1/* 2 * Intel SST Firmware Loader 3 * 4 * Copyright (C) 2013, Intel Corporation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 8 * 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17#include <linux/kernel.h> 18#include <linux/slab.h> 19#include <linux/sched.h> 20#include <linux/firmware.h> 21#include <linux/export.h> 22#include <linux/platform_device.h> 23#include <linux/dma-mapping.h> 24#include <linux/dmaengine.h> 25#include <linux/pci.h> 26#include <linux/acpi.h> 27 28/* supported DMA engine drivers */ 29#include <linux/dma/dw.h> 30 31#include <asm/page.h> 32#include <asm/pgtable.h> 33 34#include "sst-dsp.h" 35#include "sst-dsp-priv.h" 36 37#define SST_DMA_RESOURCES 2 38#define SST_DSP_DMA_MAX_BURST 0x3 39#define SST_HSW_BLOCK_ANY 0xffffffff 40 41#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000 42 43struct sst_dma { 44 struct sst_dsp *sst; 45 46 struct dw_dma_chip *chip; 47 48 struct dma_async_tx_descriptor *desc; 49 struct dma_chan *ch; 50}; 51 52static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes) 53{ 54 /* __iowrite32_copy use 32bit size values so divide by 4 */ 55 __iowrite32_copy((void *)dest, src, bytes/4); 56} 57 58static void sst_dma_transfer_complete(void *arg) 59{ 60 struct sst_dsp *sst = (struct sst_dsp *)arg; 61 62 dev_dbg(sst->dev, "DMA: callback\n"); 63} 64 65static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr, 66 dma_addr_t src_addr, size_t size) 67{ 68 struct dma_async_tx_descriptor *desc; 69 struct sst_dma *dma = sst->dma; 70 71 if (dma->ch == NULL) { 72 dev_err(sst->dev, "error: no DMA channel\n"); 73 return -ENODEV; 74 } 75 76 dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n", 77 (unsigned long)src_addr, (unsigned long)dest_addr, size); 78 79 desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr, 80 src_addr, size, DMA_CTRL_ACK); 81 if (!desc){ 82 dev_err(sst->dev, "error: dma prep memcpy failed\n"); 83 return -EINVAL; 84 } 85 86 desc->callback = sst_dma_transfer_complete; 87 desc->callback_param = sst; 88 89 desc->tx_submit(desc); 90 dma_wait_for_async_tx(desc); 91 92 return 0; 93} 94 95/* copy to DSP */ 96int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr, 97 dma_addr_t src_addr, size_t size) 98{ 99 return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP, 100 src_addr, size); 101} 102EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto); 103 104/* copy from DSP */ 105int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr, 106 dma_addr_t src_addr, size_t size) 107{ 108 return sst_dsp_dma_copy(sst, dest_addr, 109 src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size); 110} 111EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom); 112 113/* remove module from memory - callers hold locks */ 114static void block_list_remove(struct sst_dsp *dsp, 115 struct list_head *block_list) 116{ 117 struct sst_mem_block *block, *tmp; 118 int err; 119 120 /* disable each block */ 121 list_for_each_entry(block, block_list, module_list) { 122 123 if (block->ops && block->ops->disable) { 124 err = block->ops->disable(block); 125 if (err < 0) 126 dev_err(dsp->dev, 127 "error: cant disable block %d:%d\n", 128 block->type, block->index); 129 } 130 } 131 132 /* mark each block as free */ 133 list_for_each_entry_safe(block, tmp, block_list, module_list) { 134 list_del(&block->module_list); 135 list_move(&block->list, &dsp->free_block_list); 136 dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n", 137 block->type, block->index, block->offset); 138 } 139} 140 141/* prepare the memory block to receive data from host - callers hold locks */ 142static int block_list_prepare(struct sst_dsp *dsp, 143 struct list_head *block_list) 144{ 145 struct sst_mem_block *block; 146 int ret = 0; 147 148 /* enable each block so that's it'e ready for data */ 149 list_for_each_entry(block, block_list, module_list) { 150 151 if (block->ops && block->ops->enable && !block->users) { 152 ret = block->ops->enable(block); 153 if (ret < 0) { 154 dev_err(dsp->dev, 155 "error: cant disable block %d:%d\n", 156 block->type, block->index); 157 goto err; 158 } 159 } 160 } 161 return ret; 162 163err: 164 list_for_each_entry(block, block_list, module_list) { 165 if (block->ops && block->ops->disable) 166 block->ops->disable(block); 167 } 168 return ret; 169} 170 171static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem, 172 int irq) 173{ 174 struct dw_dma_chip *chip; 175 int err; 176 177 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); 178 if (!chip) 179 return ERR_PTR(-ENOMEM); 180 181 chip->irq = irq; 182 chip->regs = devm_ioremap_resource(dev, mem); 183 if (IS_ERR(chip->regs)) 184 return ERR_CAST(chip->regs); 185 186 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31)); 187 if (err) 188 return ERR_PTR(err); 189 190 chip->dev = dev; 191 192 err = dw_dma_probe(chip, NULL); 193 if (err) 194 return ERR_PTR(err); 195 196 return chip; 197} 198 199static void dw_remove(struct dw_dma_chip *chip) 200{ 201 dw_dma_remove(chip); 202} 203 204static bool dma_chan_filter(struct dma_chan *chan, void *param) 205{ 206 struct sst_dsp *dsp = (struct sst_dsp *)param; 207 208 return chan->device->dev == dsp->dma_dev; 209} 210 211int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id) 212{ 213 struct sst_dma *dma = dsp->dma; 214 struct dma_slave_config slave; 215 dma_cap_mask_t mask; 216 int ret; 217 218 dma_cap_zero(mask); 219 dma_cap_set(DMA_SLAVE, mask); 220 dma_cap_set(DMA_MEMCPY, mask); 221 222 dma->ch = dma_request_channel(mask, dma_chan_filter, dsp); 223 if (dma->ch == NULL) { 224 dev_err(dsp->dev, "error: DMA request channel failed\n"); 225 return -EIO; 226 } 227 228 memset(&slave, 0, sizeof(slave)); 229 slave.direction = DMA_MEM_TO_DEV; 230 slave.src_addr_width = 231 slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 232 slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST; 233 234 ret = dmaengine_slave_config(dma->ch, &slave); 235 if (ret) { 236 dev_err(dsp->dev, "error: unable to set DMA slave config %d\n", 237 ret); 238 dma_release_channel(dma->ch); 239 dma->ch = NULL; 240 } 241 242 return ret; 243} 244EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel); 245 246void sst_dsp_dma_put_channel(struct sst_dsp *dsp) 247{ 248 struct sst_dma *dma = dsp->dma; 249 250 if (!dma->ch) 251 return; 252 253 dma_release_channel(dma->ch); 254 dma->ch = NULL; 255} 256EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel); 257 258int sst_dma_new(struct sst_dsp *sst) 259{ 260 struct sst_pdata *sst_pdata = sst->pdata; 261 struct sst_dma *dma; 262 struct resource mem; 263 const char *dma_dev_name; 264 int ret = 0; 265 266 if (sst->pdata->resindex_dma_base == -1) 267 /* DMA is not used, return and squelsh error messages */ 268 return 0; 269 270 /* configure the correct platform data for whatever DMA engine 271 * is attached to the ADSP IP. */ 272 switch (sst->pdata->dma_engine) { 273 case SST_DMA_TYPE_DW: 274 dma_dev_name = "dw_dmac"; 275 break; 276 default: 277 dev_err(sst->dev, "error: invalid DMA engine %d\n", 278 sst->pdata->dma_engine); 279 return -EINVAL; 280 } 281 282 dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL); 283 if (!dma) 284 return -ENOMEM; 285 286 dma->sst = sst; 287 288 memset(&mem, 0, sizeof(mem)); 289 290 mem.start = sst->addr.lpe_base + sst_pdata->dma_base; 291 mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1; 292 mem.flags = IORESOURCE_MEM; 293 294 /* now register DMA engine device */ 295 dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq); 296 if (IS_ERR(dma->chip)) { 297 dev_err(sst->dev, "error: DMA device register failed\n"); 298 ret = PTR_ERR(dma->chip); 299 goto err_dma_dev; 300 } 301 302 sst->dma = dma; 303 sst->fw_use_dma = true; 304 return 0; 305 306err_dma_dev: 307 devm_kfree(sst->dev, dma); 308 return ret; 309} 310EXPORT_SYMBOL(sst_dma_new); 311 312void sst_dma_free(struct sst_dma *dma) 313{ 314 315 if (dma == NULL) 316 return; 317 318 if (dma->ch) 319 dma_release_channel(dma->ch); 320 321 if (dma->chip) 322 dw_remove(dma->chip); 323 324} 325EXPORT_SYMBOL(sst_dma_free); 326 327/* create new generic firmware object */ 328struct sst_fw *sst_fw_new(struct sst_dsp *dsp, 329 const struct firmware *fw, void *private) 330{ 331 struct sst_fw *sst_fw; 332 int err; 333 334 if (!dsp->ops->parse_fw) 335 return NULL; 336 337 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL); 338 if (sst_fw == NULL) 339 return NULL; 340 341 sst_fw->dsp = dsp; 342 sst_fw->private = private; 343 sst_fw->size = fw->size; 344 345 /* allocate DMA buffer to store FW data */ 346 sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size, 347 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL); 348 if (!sst_fw->dma_buf) { 349 dev_err(dsp->dev, "error: DMA alloc failed\n"); 350 kfree(sst_fw); 351 return NULL; 352 } 353 354 /* copy FW data to DMA-able memory */ 355 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size); 356 357 if (dsp->fw_use_dma) { 358 err = sst_dsp_dma_get_channel(dsp, 0); 359 if (err < 0) 360 goto chan_err; 361 } 362 363 /* call core specific FW paser to load FW data into DSP */ 364 err = dsp->ops->parse_fw(sst_fw); 365 if (err < 0) { 366 dev_err(dsp->dev, "error: parse fw failed %d\n", err); 367 goto parse_err; 368 } 369 370 if (dsp->fw_use_dma) 371 sst_dsp_dma_put_channel(dsp); 372 373 mutex_lock(&dsp->mutex); 374 list_add(&sst_fw->list, &dsp->fw_list); 375 mutex_unlock(&dsp->mutex); 376 377 return sst_fw; 378 379parse_err: 380 if (dsp->fw_use_dma) 381 sst_dsp_dma_put_channel(dsp); 382chan_err: 383 dma_free_coherent(dsp->dma_dev, sst_fw->size, 384 sst_fw->dma_buf, 385 sst_fw->dmable_fw_paddr); 386 sst_fw->dma_buf = NULL; 387 kfree(sst_fw); 388 return NULL; 389} 390EXPORT_SYMBOL_GPL(sst_fw_new); 391 392int sst_fw_reload(struct sst_fw *sst_fw) 393{ 394 struct sst_dsp *dsp = sst_fw->dsp; 395 int ret; 396 397 dev_dbg(dsp->dev, "reloading firmware\n"); 398 399 /* call core specific FW paser to load FW data into DSP */ 400 ret = dsp->ops->parse_fw(sst_fw); 401 if (ret < 0) 402 dev_err(dsp->dev, "error: parse fw failed %d\n", ret); 403 404 return ret; 405} 406EXPORT_SYMBOL_GPL(sst_fw_reload); 407 408void sst_fw_unload(struct sst_fw *sst_fw) 409{ 410 struct sst_dsp *dsp = sst_fw->dsp; 411 struct sst_module *module, *mtmp; 412 struct sst_module_runtime *runtime, *rtmp; 413 414 dev_dbg(dsp->dev, "unloading firmware\n"); 415 416 mutex_lock(&dsp->mutex); 417 418 /* check module by module */ 419 list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) { 420 if (module->sst_fw == sst_fw) { 421 422 /* remove runtime modules */ 423 list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) { 424 425 block_list_remove(dsp, &runtime->block_list); 426 list_del(&runtime->list); 427 kfree(runtime); 428 } 429 430 /* now remove the module */ 431 block_list_remove(dsp, &module->block_list); 432 list_del(&module->list); 433 kfree(module); 434 } 435 } 436 437 /* remove all scratch blocks */ 438 block_list_remove(dsp, &dsp->scratch_block_list); 439 440 mutex_unlock(&dsp->mutex); 441} 442EXPORT_SYMBOL_GPL(sst_fw_unload); 443 444/* free single firmware object */ 445void sst_fw_free(struct sst_fw *sst_fw) 446{ 447 struct sst_dsp *dsp = sst_fw->dsp; 448 449 mutex_lock(&dsp->mutex); 450 list_del(&sst_fw->list); 451 mutex_unlock(&dsp->mutex); 452 453 if (sst_fw->dma_buf) 454 dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf, 455 sst_fw->dmable_fw_paddr); 456 kfree(sst_fw); 457} 458EXPORT_SYMBOL_GPL(sst_fw_free); 459 460/* free all firmware objects */ 461void sst_fw_free_all(struct sst_dsp *dsp) 462{ 463 struct sst_fw *sst_fw, *t; 464 465 mutex_lock(&dsp->mutex); 466 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) { 467 468 list_del(&sst_fw->list); 469 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf, 470 sst_fw->dmable_fw_paddr); 471 kfree(sst_fw); 472 } 473 mutex_unlock(&dsp->mutex); 474} 475EXPORT_SYMBOL_GPL(sst_fw_free_all); 476 477/* create a new SST generic module from FW template */ 478struct sst_module *sst_module_new(struct sst_fw *sst_fw, 479 struct sst_module_template *template, void *private) 480{ 481 struct sst_dsp *dsp = sst_fw->dsp; 482 struct sst_module *sst_module; 483 484 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL); 485 if (sst_module == NULL) 486 return NULL; 487 488 sst_module->id = template->id; 489 sst_module->dsp = dsp; 490 sst_module->sst_fw = sst_fw; 491 sst_module->scratch_size = template->scratch_size; 492 sst_module->persistent_size = template->persistent_size; 493 sst_module->entry = template->entry; 494 sst_module->state = SST_MODULE_STATE_UNLOADED; 495 496 INIT_LIST_HEAD(&sst_module->block_list); 497 INIT_LIST_HEAD(&sst_module->runtime_list); 498 499 mutex_lock(&dsp->mutex); 500 list_add(&sst_module->list, &dsp->module_list); 501 mutex_unlock(&dsp->mutex); 502 503 return sst_module; 504} 505EXPORT_SYMBOL_GPL(sst_module_new); 506 507/* free firmware module and remove from available list */ 508void sst_module_free(struct sst_module *sst_module) 509{ 510 struct sst_dsp *dsp = sst_module->dsp; 511 512 mutex_lock(&dsp->mutex); 513 list_del(&sst_module->list); 514 mutex_unlock(&dsp->mutex); 515 516 kfree(sst_module); 517} 518EXPORT_SYMBOL_GPL(sst_module_free); 519 520struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module, 521 int id, void *private) 522{ 523 struct sst_dsp *dsp = module->dsp; 524 struct sst_module_runtime *runtime; 525 526 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL); 527 if (runtime == NULL) 528 return NULL; 529 530 runtime->id = id; 531 runtime->dsp = dsp; 532 runtime->module = module; 533 INIT_LIST_HEAD(&runtime->block_list); 534 535 mutex_lock(&dsp->mutex); 536 list_add(&runtime->list, &module->runtime_list); 537 mutex_unlock(&dsp->mutex); 538 539 return runtime; 540} 541EXPORT_SYMBOL_GPL(sst_module_runtime_new); 542 543void sst_module_runtime_free(struct sst_module_runtime *runtime) 544{ 545 struct sst_dsp *dsp = runtime->dsp; 546 547 mutex_lock(&dsp->mutex); 548 list_del(&runtime->list); 549 mutex_unlock(&dsp->mutex); 550 551 kfree(runtime); 552} 553EXPORT_SYMBOL_GPL(sst_module_runtime_free); 554 555static struct sst_mem_block *find_block(struct sst_dsp *dsp, 556 struct sst_block_allocator *ba) 557{ 558 struct sst_mem_block *block; 559 560 list_for_each_entry(block, &dsp->free_block_list, list) { 561 if (block->type == ba->type && block->offset == ba->offset) 562 return block; 563 } 564 565 return NULL; 566} 567 568/* Block allocator must be on block boundary */ 569static int block_alloc_contiguous(struct sst_dsp *dsp, 570 struct sst_block_allocator *ba, struct list_head *block_list) 571{ 572 struct list_head tmp = LIST_HEAD_INIT(tmp); 573 struct sst_mem_block *block; 574 u32 block_start = SST_HSW_BLOCK_ANY; 575 int size = ba->size, offset = ba->offset; 576 577 while (ba->size > 0) { 578 579 block = find_block(dsp, ba); 580 if (!block) { 581 list_splice(&tmp, &dsp->free_block_list); 582 583 ba->size = size; 584 ba->offset = offset; 585 return -ENOMEM; 586 } 587 588 list_move_tail(&block->list, &tmp); 589 ba->offset += block->size; 590 ba->size -= block->size; 591 } 592 ba->size = size; 593 ba->offset = offset; 594 595 list_for_each_entry(block, &tmp, list) { 596 597 if (block->offset < block_start) 598 block_start = block->offset; 599 600 list_add(&block->module_list, block_list); 601 602 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", 603 block->type, block->index, block->offset); 604 } 605 606 list_splice(&tmp, &dsp->used_block_list); 607 return 0; 608} 609 610/* allocate first free DSP blocks for data - callers hold locks */ 611static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba, 612 struct list_head *block_list) 613{ 614 struct sst_mem_block *block, *tmp; 615 int ret = 0; 616 617 if (ba->size == 0) 618 return 0; 619 620 /* find first free whole blocks that can hold module */ 621 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 622 623 /* ignore blocks with wrong type */ 624 if (block->type != ba->type) 625 continue; 626 627 if (ba->size > block->size) 628 continue; 629 630 ba->offset = block->offset; 631 block->bytes_used = ba->size % block->size; 632 list_add(&block->module_list, block_list); 633 list_move(&block->list, &dsp->used_block_list); 634 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", 635 block->type, block->index, block->offset); 636 return 0; 637 } 638 639 /* then find free multiple blocks that can hold module */ 640 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 641 642 /* ignore blocks with wrong type */ 643 if (block->type != ba->type) 644 continue; 645 646 /* do we span > 1 blocks */ 647 if (ba->size > block->size) { 648 649 /* align ba to block boundary */ 650 ba->offset = block->offset; 651 652 ret = block_alloc_contiguous(dsp, ba, block_list); 653 if (ret == 0) 654 return ret; 655 656 } 657 } 658 659 /* not enough free block space */ 660 return -ENOMEM; 661} 662 663int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba, 664 struct list_head *block_list) 665{ 666 int ret; 667 668 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n", 669 ba->size, ba->offset, ba->type); 670 671 mutex_lock(&dsp->mutex); 672 673 ret = block_alloc(dsp, ba, block_list); 674 if (ret < 0) { 675 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret); 676 goto out; 677 } 678 679 /* prepare DSP blocks for module usage */ 680 ret = block_list_prepare(dsp, block_list); 681 if (ret < 0) 682 dev_err(dsp->dev, "error: prepare failed\n"); 683 684out: 685 mutex_unlock(&dsp->mutex); 686 return ret; 687} 688EXPORT_SYMBOL_GPL(sst_alloc_blocks); 689 690int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list) 691{ 692 mutex_lock(&dsp->mutex); 693 block_list_remove(dsp, block_list); 694 mutex_unlock(&dsp->mutex); 695 return 0; 696} 697EXPORT_SYMBOL_GPL(sst_free_blocks); 698 699/* allocate memory blocks for static module addresses - callers hold locks */ 700static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba, 701 struct list_head *block_list) 702{ 703 struct sst_mem_block *block, *tmp; 704 struct sst_block_allocator ba_tmp = *ba; 705 u32 end = ba->offset + ba->size, block_end; 706 int err; 707 708 /* only IRAM/DRAM blocks are managed */ 709 if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM) 710 return 0; 711 712 /* are blocks already attached to this module */ 713 list_for_each_entry_safe(block, tmp, block_list, module_list) { 714 715 /* ignore blocks with wrong type */ 716 if (block->type != ba->type) 717 continue; 718 719 block_end = block->offset + block->size; 720 721 /* find block that holds section */ 722 if (ba->offset >= block->offset && end <= block_end) 723 return 0; 724 725 /* does block span more than 1 section */ 726 if (ba->offset >= block->offset && ba->offset < block_end) { 727 728 /* align ba to block boundary */ 729 ba_tmp.size -= block_end - ba->offset; 730 ba_tmp.offset = block_end; 731 err = block_alloc_contiguous(dsp, &ba_tmp, block_list); 732 if (err < 0) 733 return -ENOMEM; 734 735 /* module already owns blocks */ 736 return 0; 737 } 738 } 739 740 /* find first free blocks that can hold section in free list */ 741 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 742 block_end = block->offset + block->size; 743 744 /* ignore blocks with wrong type */ 745 if (block->type != ba->type) 746 continue; 747 748 /* find block that holds section */ 749 if (ba->offset >= block->offset && end <= block_end) { 750 751 /* add block */ 752 list_move(&block->list, &dsp->used_block_list); 753 list_add(&block->module_list, block_list); 754 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n", 755 block->type, block->index, block->offset); 756 return 0; 757 } 758 759 /* does block span more than 1 section */ 760 if (ba->offset >= block->offset && ba->offset < block_end) { 761 762 /* add block */ 763 list_move(&block->list, &dsp->used_block_list); 764 list_add(&block->module_list, block_list); 765 /* align ba to block boundary */ 766 ba_tmp.size -= block_end - ba->offset; 767 ba_tmp.offset = block_end; 768 769 err = block_alloc_contiguous(dsp, &ba_tmp, block_list); 770 if (err < 0) 771 return -ENOMEM; 772 773 return 0; 774 } 775 } 776 777 return -ENOMEM; 778} 779 780/* Load fixed module data into DSP memory blocks */ 781int sst_module_alloc_blocks(struct sst_module *module) 782{ 783 struct sst_dsp *dsp = module->dsp; 784 struct sst_fw *sst_fw = module->sst_fw; 785 struct sst_block_allocator ba; 786 int ret; 787 788 memset(&ba, 0, sizeof(ba)); 789 ba.size = module->size; 790 ba.type = module->type; 791 ba.offset = module->offset; 792 793 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n", 794 ba.size, ba.offset, ba.type); 795 796 mutex_lock(&dsp->mutex); 797 798 /* alloc blocks that includes this section */ 799 ret = block_alloc_fixed(dsp, &ba, &module->block_list); 800 if (ret < 0) { 801 dev_err(dsp->dev, 802 "error: no free blocks for section at offset 0x%x size 0x%x\n", 803 module->offset, module->size); 804 mutex_unlock(&dsp->mutex); 805 return -ENOMEM; 806 } 807 808 /* prepare DSP blocks for module copy */ 809 ret = block_list_prepare(dsp, &module->block_list); 810 if (ret < 0) { 811 dev_err(dsp->dev, "error: fw module prepare failed\n"); 812 goto err; 813 } 814 815 /* copy partial module data to blocks */ 816 if (dsp->fw_use_dma) { 817 ret = sst_dsp_dma_copyto(dsp, 818 dsp->addr.lpe_base + module->offset, 819 sst_fw->dmable_fw_paddr + module->data_offset, 820 module->size); 821 if (ret < 0) { 822 dev_err(dsp->dev, "error: module copy failed\n"); 823 goto err; 824 } 825 } else 826 sst_memcpy32(dsp->addr.lpe + module->offset, module->data, 827 module->size); 828 829 mutex_unlock(&dsp->mutex); 830 return ret; 831 832err: 833 block_list_remove(dsp, &module->block_list); 834 mutex_unlock(&dsp->mutex); 835 return ret; 836} 837EXPORT_SYMBOL_GPL(sst_module_alloc_blocks); 838 839/* Unload entire module from DSP memory */ 840int sst_module_free_blocks(struct sst_module *module) 841{ 842 struct sst_dsp *dsp = module->dsp; 843 844 mutex_lock(&dsp->mutex); 845 block_list_remove(dsp, &module->block_list); 846 mutex_unlock(&dsp->mutex); 847 return 0; 848} 849EXPORT_SYMBOL_GPL(sst_module_free_blocks); 850 851int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime, 852 int offset) 853{ 854 struct sst_dsp *dsp = runtime->dsp; 855 struct sst_module *module = runtime->module; 856 struct sst_block_allocator ba; 857 int ret; 858 859 if (module->persistent_size == 0) 860 return 0; 861 862 memset(&ba, 0, sizeof(ba)); 863 ba.size = module->persistent_size; 864 ba.type = SST_MEM_DRAM; 865 866 mutex_lock(&dsp->mutex); 867 868 /* do we need to allocate at a fixed address ? */ 869 if (offset != 0) { 870 871 ba.offset = offset; 872 873 dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n", 874 ba.size, ba.type, ba.offset); 875 876 /* alloc blocks that includes this section */ 877 ret = block_alloc_fixed(dsp, &ba, &runtime->block_list); 878 879 } else { 880 dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n", 881 ba.size, ba.type); 882 883 /* alloc blocks that includes this section */ 884 ret = block_alloc(dsp, &ba, &runtime->block_list); 885 } 886 if (ret < 0) { 887 dev_err(dsp->dev, 888 "error: no free blocks for runtime module size 0x%x\n", 889 module->persistent_size); 890 mutex_unlock(&dsp->mutex); 891 return -ENOMEM; 892 } 893 runtime->persistent_offset = ba.offset; 894 895 /* prepare DSP blocks for module copy */ 896 ret = block_list_prepare(dsp, &runtime->block_list); 897 if (ret < 0) { 898 dev_err(dsp->dev, "error: runtime block prepare failed\n"); 899 goto err; 900 } 901 902 mutex_unlock(&dsp->mutex); 903 return ret; 904 905err: 906 block_list_remove(dsp, &module->block_list); 907 mutex_unlock(&dsp->mutex); 908 return ret; 909} 910EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks); 911 912int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime) 913{ 914 struct sst_dsp *dsp = runtime->dsp; 915 916 mutex_lock(&dsp->mutex); 917 block_list_remove(dsp, &runtime->block_list); 918 mutex_unlock(&dsp->mutex); 919 return 0; 920} 921EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks); 922 923int sst_module_runtime_save(struct sst_module_runtime *runtime, 924 struct sst_module_runtime_context *context) 925{ 926 struct sst_dsp *dsp = runtime->dsp; 927 struct sst_module *module = runtime->module; 928 int ret = 0; 929 930 dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n", 931 runtime->id, runtime->persistent_offset, 932 module->persistent_size); 933 934 context->buffer = dma_alloc_coherent(dsp->dma_dev, 935 module->persistent_size, 936 &context->dma_buffer, GFP_DMA | GFP_KERNEL); 937 if (!context->buffer) { 938 dev_err(dsp->dev, "error: DMA context alloc failed\n"); 939 return -ENOMEM; 940 } 941 942 mutex_lock(&dsp->mutex); 943 944 if (dsp->fw_use_dma) { 945 946 ret = sst_dsp_dma_get_channel(dsp, 0); 947 if (ret < 0) 948 goto err; 949 950 ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer, 951 dsp->addr.lpe_base + runtime->persistent_offset, 952 module->persistent_size); 953 sst_dsp_dma_put_channel(dsp); 954 if (ret < 0) { 955 dev_err(dsp->dev, "error: context copy failed\n"); 956 goto err; 957 } 958 } else 959 sst_memcpy32(context->buffer, dsp->addr.lpe + 960 runtime->persistent_offset, 961 module->persistent_size); 962 963err: 964 mutex_unlock(&dsp->mutex); 965 return ret; 966} 967EXPORT_SYMBOL_GPL(sst_module_runtime_save); 968 969int sst_module_runtime_restore(struct sst_module_runtime *runtime, 970 struct sst_module_runtime_context *context) 971{ 972 struct sst_dsp *dsp = runtime->dsp; 973 struct sst_module *module = runtime->module; 974 int ret = 0; 975 976 dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n", 977 runtime->id, runtime->persistent_offset, 978 module->persistent_size); 979 980 mutex_lock(&dsp->mutex); 981 982 if (!context->buffer) { 983 dev_info(dsp->dev, "no context buffer need to restore!\n"); 984 goto err; 985 } 986 987 if (dsp->fw_use_dma) { 988 989 ret = sst_dsp_dma_get_channel(dsp, 0); 990 if (ret < 0) 991 goto err; 992 993 ret = sst_dsp_dma_copyto(dsp, 994 dsp->addr.lpe_base + runtime->persistent_offset, 995 context->dma_buffer, module->persistent_size); 996 sst_dsp_dma_put_channel(dsp); 997 if (ret < 0) { 998 dev_err(dsp->dev, "error: module copy failed\n"); 999 goto err; 1000 } 1001 } else 1002 sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset, 1003 context->buffer, module->persistent_size); 1004 1005 dma_free_coherent(dsp->dma_dev, module->persistent_size, 1006 context->buffer, context->dma_buffer); 1007 context->buffer = NULL; 1008 1009err: 1010 mutex_unlock(&dsp->mutex); 1011 return ret; 1012} 1013EXPORT_SYMBOL_GPL(sst_module_runtime_restore); 1014 1015/* register a DSP memory block for use with FW based modules */ 1016struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset, 1017 u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index, 1018 void *private) 1019{ 1020 struct sst_mem_block *block; 1021 1022 block = kzalloc(sizeof(*block), GFP_KERNEL); 1023 if (block == NULL) 1024 return NULL; 1025 1026 block->offset = offset; 1027 block->size = size; 1028 block->index = index; 1029 block->type = type; 1030 block->dsp = dsp; 1031 block->private = private; 1032 block->ops = ops; 1033 1034 mutex_lock(&dsp->mutex); 1035 list_add(&block->list, &dsp->free_block_list); 1036 mutex_unlock(&dsp->mutex); 1037 1038 return block; 1039} 1040EXPORT_SYMBOL_GPL(sst_mem_block_register); 1041 1042/* unregister all DSP memory blocks */ 1043void sst_mem_block_unregister_all(struct sst_dsp *dsp) 1044{ 1045 struct sst_mem_block *block, *tmp; 1046 1047 mutex_lock(&dsp->mutex); 1048 1049 /* unregister used blocks */ 1050 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) { 1051 list_del(&block->list); 1052 kfree(block); 1053 } 1054 1055 /* unregister free blocks */ 1056 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) { 1057 list_del(&block->list); 1058 kfree(block); 1059 } 1060 1061 mutex_unlock(&dsp->mutex); 1062} 1063EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all); 1064 1065/* allocate scratch buffer blocks */ 1066int sst_block_alloc_scratch(struct sst_dsp *dsp) 1067{ 1068 struct sst_module *module; 1069 struct sst_block_allocator ba; 1070 int ret; 1071 1072 mutex_lock(&dsp->mutex); 1073 1074 /* calculate required scratch size */ 1075 dsp->scratch_size = 0; 1076 list_for_each_entry(module, &dsp->module_list, list) { 1077 dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n", 1078 module->id, module->scratch_size); 1079 if (dsp->scratch_size < module->scratch_size) 1080 dsp->scratch_size = module->scratch_size; 1081 } 1082 1083 dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n", 1084 dsp->scratch_size); 1085 1086 if (dsp->scratch_size == 0) { 1087 dev_info(dsp->dev, "no modules need scratch buffer\n"); 1088 mutex_unlock(&dsp->mutex); 1089 return 0; 1090 } 1091 1092 /* allocate blocks for module scratch buffers */ 1093 dev_dbg(dsp->dev, "allocating scratch blocks\n"); 1094 1095 ba.size = dsp->scratch_size; 1096 ba.type = SST_MEM_DRAM; 1097 1098 /* do we need to allocate at fixed offset */ 1099 if (dsp->scratch_offset != 0) { 1100 1101 dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n", 1102 ba.size, ba.type, ba.offset); 1103 1104 ba.offset = dsp->scratch_offset; 1105 1106 /* alloc blocks that includes this section */ 1107 ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list); 1108 1109 } else { 1110 dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n", 1111 ba.size, ba.type); 1112 1113 ba.offset = 0; 1114 ret = block_alloc(dsp, &ba, &dsp->scratch_block_list); 1115 } 1116 if (ret < 0) { 1117 dev_err(dsp->dev, "error: can't alloc scratch blocks\n"); 1118 mutex_unlock(&dsp->mutex); 1119 return ret; 1120 } 1121 1122 ret = block_list_prepare(dsp, &dsp->scratch_block_list); 1123 if (ret < 0) { 1124 dev_err(dsp->dev, "error: scratch block prepare failed\n"); 1125 mutex_unlock(&dsp->mutex); 1126 return ret; 1127 } 1128 1129 /* assign the same offset of scratch to each module */ 1130 dsp->scratch_offset = ba.offset; 1131 mutex_unlock(&dsp->mutex); 1132 return dsp->scratch_size; 1133} 1134EXPORT_SYMBOL_GPL(sst_block_alloc_scratch); 1135 1136/* free all scratch blocks */ 1137void sst_block_free_scratch(struct sst_dsp *dsp) 1138{ 1139 mutex_lock(&dsp->mutex); 1140 block_list_remove(dsp, &dsp->scratch_block_list); 1141 mutex_unlock(&dsp->mutex); 1142} 1143EXPORT_SYMBOL_GPL(sst_block_free_scratch); 1144 1145/* get a module from it's unique ID */ 1146struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id) 1147{ 1148 struct sst_module *module; 1149 1150 mutex_lock(&dsp->mutex); 1151 1152 list_for_each_entry(module, &dsp->module_list, list) { 1153 if (module->id == id) { 1154 mutex_unlock(&dsp->mutex); 1155 return module; 1156 } 1157 } 1158 1159 mutex_unlock(&dsp->mutex); 1160 return NULL; 1161} 1162EXPORT_SYMBOL_GPL(sst_module_get_from_id); 1163 1164struct sst_module_runtime *sst_module_runtime_get_from_id( 1165 struct sst_module *module, u32 id) 1166{ 1167 struct sst_module_runtime *runtime; 1168 struct sst_dsp *dsp = module->dsp; 1169 1170 mutex_lock(&dsp->mutex); 1171 1172 list_for_each_entry(runtime, &module->runtime_list, list) { 1173 if (runtime->id == id) { 1174 mutex_unlock(&dsp->mutex); 1175 return runtime; 1176 } 1177 } 1178 1179 mutex_unlock(&dsp->mutex); 1180 return NULL; 1181} 1182EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id); 1183 1184/* returns block address in DSP address space */ 1185u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset, 1186 enum sst_mem_type type) 1187{ 1188 switch (type) { 1189 case SST_MEM_IRAM: 1190 return offset - dsp->addr.iram_offset + 1191 dsp->addr.dsp_iram_offset; 1192 case SST_MEM_DRAM: 1193 return offset - dsp->addr.dram_offset + 1194 dsp->addr.dsp_dram_offset; 1195 default: 1196 return 0; 1197 } 1198} 1199EXPORT_SYMBOL_GPL(sst_dsp_get_offset); 1200