This source file includes following definitions.
- cnl_prepare_fw
- sst_transfer_fw_host_dma
- cnl_load_base_firmware
- cnl_set_dsp_D0
- cnl_set_dsp_D3
- cnl_get_errno
- cnl_dsp_irq_thread_handler
- cnl_ipc_tx_msg
- cnl_ipc_is_dsp_busy
- cnl_ipc_init
- cnl_sst_dsp_init
- cnl_sst_init_fw
- cnl_sst_dsp_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/module.h>
19 #include <linux/delay.h>
20 #include <linux/firmware.h>
21 #include <linux/device.h>
22
23 #include "../common/sst-dsp.h"
24 #include "../common/sst-dsp-priv.h"
25 #include "../common/sst-ipc.h"
26 #include "cnl-sst-dsp.h"
27 #include "skl.h"
28
29 #define CNL_FW_ROM_INIT 0x1
30 #define CNL_FW_INIT 0x5
31 #define CNL_IPC_PURGE 0x01004000
32 #define CNL_INIT_TIMEOUT 300
33 #define CNL_BASEFW_TIMEOUT 3000
34
35 #define CNL_ADSP_SRAM0_BASE 0x80000
36
37
38 #define CNL_ADSP_FW_STATUS CNL_ADSP_SRAM0_BASE
39 #define CNL_ADSP_ERROR_CODE (CNL_ADSP_FW_STATUS + 0x4)
40
41 #define CNL_INSTANCE_ID 0
42 #define CNL_BASE_FW_MODULE_ID 0
43 #define CNL_ADSP_FW_HDR_OFFSET 0x2000
44 #define CNL_ROM_CTRL_DMA_ID 0x9
45
46 static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
47 {
48
49 int ret, stream_tag;
50
51 stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
52 if (stream_tag <= 0) {
53 dev_err(ctx->dev, "dma prepare failed: 0%#x\n", stream_tag);
54 return stream_tag;
55 }
56
57 ctx->dsp_ops.stream_tag = stream_tag;
58 memcpy(ctx->dmab.area, fwdata, fwsize);
59
60
61 sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
62 CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
63 ((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
64
65 ret = cnl_dsp_enable_core(ctx, SKL_DSP_CORE0_MASK);
66 if (ret < 0) {
67 dev_err(ctx->dev, "dsp boot core failed ret: %d\n", ret);
68 ret = -EIO;
69 goto base_fw_load_failed;
70 }
71
72
73 cnl_ipc_int_enable(ctx);
74 cnl_ipc_op_int_enable(ctx);
75
76 ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
77 CNL_FW_ROM_INIT, CNL_INIT_TIMEOUT,
78 "rom load");
79 if (ret < 0) {
80 dev_err(ctx->dev, "rom init timeout, ret: %d\n", ret);
81 goto base_fw_load_failed;
82 }
83
84 return 0;
85
86 base_fw_load_failed:
87 ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
88 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
89
90 return ret;
91 }
92
93 static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
94 {
95 int ret;
96
97 ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
98 ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
99 CNL_FW_INIT, CNL_BASEFW_TIMEOUT,
100 "firmware boot");
101
102 ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
103 ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
104
105 return ret;
106 }
107
108 static int cnl_load_base_firmware(struct sst_dsp *ctx)
109 {
110 struct firmware stripped_fw;
111 struct skl_dev *cnl = ctx->thread_context;
112 int ret;
113
114 if (!ctx->fw) {
115 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
116 if (ret < 0) {
117 dev_err(ctx->dev, "request firmware failed: %d\n", ret);
118 goto cnl_load_base_firmware_failed;
119 }
120 }
121
122
123 if (cnl->is_first_boot) {
124 ret = snd_skl_parse_uuids(ctx, ctx->fw,
125 CNL_ADSP_FW_HDR_OFFSET, 0);
126 if (ret < 0)
127 goto cnl_load_base_firmware_failed;
128 }
129
130 stripped_fw.data = ctx->fw->data;
131 stripped_fw.size = ctx->fw->size;
132 skl_dsp_strip_extended_manifest(&stripped_fw);
133
134 ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
135 if (ret < 0) {
136 dev_err(ctx->dev, "prepare firmware failed: %d\n", ret);
137 goto cnl_load_base_firmware_failed;
138 }
139
140 ret = sst_transfer_fw_host_dma(ctx);
141 if (ret < 0) {
142 dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
143 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
144 goto cnl_load_base_firmware_failed;
145 }
146
147 ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
148 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
149 if (ret == 0) {
150 dev_err(ctx->dev, "FW ready timed-out\n");
151 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
152 ret = -EIO;
153 goto cnl_load_base_firmware_failed;
154 }
155
156 cnl->fw_loaded = true;
157
158 return 0;
159
160 cnl_load_base_firmware_failed:
161 release_firmware(ctx->fw);
162 ctx->fw = NULL;
163
164 return ret;
165 }
166
167 static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
168 {
169 struct skl_dev *cnl = ctx->thread_context;
170 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
171 struct skl_ipc_dxstate_info dx;
172 int ret;
173
174 if (!cnl->fw_loaded) {
175 cnl->boot_complete = false;
176 ret = cnl_load_base_firmware(ctx);
177 if (ret < 0) {
178 dev_err(ctx->dev, "fw reload failed: %d\n", ret);
179 return ret;
180 }
181
182 cnl->cores.state[core_id] = SKL_DSP_RUNNING;
183 return ret;
184 }
185
186 ret = cnl_dsp_enable_core(ctx, core_mask);
187 if (ret < 0) {
188 dev_err(ctx->dev, "enable dsp core %d failed: %d\n",
189 core_id, ret);
190 goto err;
191 }
192
193 if (core_id == SKL_DSP_CORE0_ID) {
194
195 cnl_ipc_int_enable(ctx);
196 cnl_ipc_op_int_enable(ctx);
197 cnl->boot_complete = false;
198
199 ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
200 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
201 if (ret == 0) {
202 dev_err(ctx->dev,
203 "dsp boot timeout, status=%#x error=%#x\n",
204 sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
205 sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
206 goto err;
207 }
208 } else {
209 dx.core_mask = core_mask;
210 dx.dx_mask = core_mask;
211
212 ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
213 CNL_BASE_FW_MODULE_ID, &dx);
214 if (ret < 0) {
215 dev_err(ctx->dev, "set_dx failed, core: %d ret: %d\n",
216 core_id, ret);
217 goto err;
218 }
219 }
220 cnl->cores.state[core_id] = SKL_DSP_RUNNING;
221
222 return 0;
223 err:
224 cnl_dsp_disable_core(ctx, core_mask);
225
226 return ret;
227 }
228
229 static int cnl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
230 {
231 struct skl_dev *cnl = ctx->thread_context;
232 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
233 struct skl_ipc_dxstate_info dx;
234 int ret;
235
236 dx.core_mask = core_mask;
237 dx.dx_mask = SKL_IPC_D3_MASK;
238
239 ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
240 CNL_BASE_FW_MODULE_ID, &dx);
241 if (ret < 0) {
242 dev_err(ctx->dev,
243 "dsp core %d to d3 failed; continue reset\n",
244 core_id);
245 cnl->fw_loaded = false;
246 }
247
248
249 if (core_id == SKL_DSP_CORE0_ID) {
250 skl_ipc_op_int_disable(ctx);
251 skl_ipc_int_disable(ctx);
252 }
253
254 ret = cnl_dsp_disable_core(ctx, core_mask);
255 if (ret < 0) {
256 dev_err(ctx->dev, "disable dsp core %d failed: %d\n",
257 core_id, ret);
258 return ret;
259 }
260
261 cnl->cores.state[core_id] = SKL_DSP_RESET;
262
263 return ret;
264 }
265
266 static unsigned int cnl_get_errno(struct sst_dsp *ctx)
267 {
268 return sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE);
269 }
270
271 static const struct skl_dsp_fw_ops cnl_fw_ops = {
272 .set_state_D0 = cnl_set_dsp_D0,
273 .set_state_D3 = cnl_set_dsp_D3,
274 .load_fw = cnl_load_base_firmware,
275 .get_fw_errcode = cnl_get_errno,
276 };
277
278 static struct sst_ops cnl_ops = {
279 .irq_handler = cnl_dsp_sst_interrupt,
280 .write = sst_shim32_write,
281 .read = sst_shim32_read,
282 .ram_read = sst_memcpy_fromio_32,
283 .ram_write = sst_memcpy_toio_32,
284 .free = cnl_dsp_free,
285 };
286
287 #define CNL_IPC_GLB_NOTIFY_RSP_SHIFT 29
288 #define CNL_IPC_GLB_NOTIFY_RSP_MASK 0x1
289 #define CNL_IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> CNL_IPC_GLB_NOTIFY_RSP_SHIFT) \
290 & CNL_IPC_GLB_NOTIFY_RSP_MASK)
291
292 static irqreturn_t cnl_dsp_irq_thread_handler(int irq, void *context)
293 {
294 struct sst_dsp *dsp = context;
295 struct skl_dev *cnl = sst_dsp_get_thread_context(dsp);
296 struct sst_generic_ipc *ipc = &cnl->ipc;
297 struct skl_ipc_header header = {0};
298 u32 hipcida, hipctdr, hipctdd;
299 int ipc_irq = 0;
300
301
302 if (!(dsp->intr_status & CNL_ADSPIS_IPC))
303 return IRQ_NONE;
304
305 hipcida = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDA);
306 hipctdr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDR);
307 hipctdd = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDD);
308
309
310 if (hipcida & CNL_ADSP_REG_HIPCIDA_DONE) {
311 sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
312 CNL_ADSP_REG_HIPCCTL_DONE, 0);
313
314
315 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCIDA,
316 CNL_ADSP_REG_HIPCIDA_DONE, CNL_ADSP_REG_HIPCIDA_DONE);
317
318 ipc_irq = 1;
319
320
321 sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
322 CNL_ADSP_REG_HIPCCTL_DONE, CNL_ADSP_REG_HIPCCTL_DONE);
323 }
324
325
326 if (hipctdr & CNL_ADSP_REG_HIPCTDR_BUSY) {
327 header.primary = hipctdr;
328 header.extension = hipctdd;
329 dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
330 header.primary);
331 dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
332 header.extension);
333
334 if (CNL_IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
335
336 skl_ipc_process_reply(ipc, header);
337 } else {
338 dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
339 skl_ipc_process_notification(ipc, header);
340 }
341
342 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDR,
343 CNL_ADSP_REG_HIPCTDR_BUSY, CNL_ADSP_REG_HIPCTDR_BUSY);
344
345
346 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDA,
347 CNL_ADSP_REG_HIPCTDA_DONE, CNL_ADSP_REG_HIPCTDA_DONE);
348 ipc_irq = 1;
349 }
350
351 if (ipc_irq == 0)
352 return IRQ_NONE;
353
354 cnl_ipc_int_enable(dsp);
355
356
357 schedule_work(&ipc->kwork);
358
359 return IRQ_HANDLED;
360 }
361
362 static struct sst_dsp_device cnl_dev = {
363 .thread = cnl_dsp_irq_thread_handler,
364 .ops = &cnl_ops,
365 };
366
367 static void cnl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
368 {
369 struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->tx.header);
370
371 if (msg->tx.size)
372 sst_dsp_outbox_write(ipc->dsp, msg->tx.data, msg->tx.size);
373 sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDD,
374 header->extension);
375 sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDR,
376 header->primary | CNL_ADSP_REG_HIPCIDR_BUSY);
377 }
378
379 static bool cnl_ipc_is_dsp_busy(struct sst_dsp *dsp)
380 {
381 u32 hipcidr;
382
383 hipcidr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDR);
384
385 return (hipcidr & CNL_ADSP_REG_HIPCIDR_BUSY);
386 }
387
388 static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl)
389 {
390 struct sst_generic_ipc *ipc;
391 int err;
392
393 ipc = &cnl->ipc;
394 ipc->dsp = cnl->dsp;
395 ipc->dev = dev;
396
397 ipc->tx_data_max_size = CNL_ADSP_W1_SZ;
398 ipc->rx_data_max_size = CNL_ADSP_W0_UP_SZ;
399
400 err = sst_ipc_init(ipc);
401 if (err)
402 return err;
403
404
405
406
407
408 ipc->ops.tx_msg = cnl_ipc_tx_msg;
409 ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
410 ipc->ops.is_dsp_busy = cnl_ipc_is_dsp_busy;
411
412 return 0;
413 }
414
415 int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
416 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
417 struct skl_dev **dsp)
418 {
419 struct skl_dev *cnl;
420 struct sst_dsp *sst;
421 int ret;
422
423 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &cnl_dev);
424 if (ret < 0) {
425 dev_err(dev, "%s: no device\n", __func__);
426 return ret;
427 }
428
429 cnl = *dsp;
430 sst = cnl->dsp;
431 sst->fw_ops = cnl_fw_ops;
432 sst->addr.lpe = mmio_base;
433 sst->addr.shim = mmio_base;
434 sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE;
435 sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE;
436 sst->addr.w0_stat_sz = CNL_ADSP_W0_STAT_SZ;
437 sst->addr.w0_up_sz = CNL_ADSP_W0_UP_SZ;
438
439 sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ),
440 CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE,
441 CNL_ADSP_W1_SZ);
442
443 ret = cnl_ipc_init(dev, cnl);
444 if (ret) {
445 skl_dsp_free(sst);
446 return ret;
447 }
448
449 cnl->boot_complete = false;
450 init_waitqueue_head(&cnl->boot_wait);
451
452 return skl_dsp_acquire_irq(sst);
453 }
454 EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
455
456 int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl)
457 {
458 int ret;
459 struct sst_dsp *sst = skl->dsp;
460
461 ret = skl->dsp->fw_ops.load_fw(sst);
462 if (ret < 0) {
463 dev_err(dev, "load base fw failed: %d", ret);
464 return ret;
465 }
466
467 skl_dsp_init_core_state(sst);
468
469 skl->is_first_boot = false;
470
471 return 0;
472 }
473 EXPORT_SYMBOL_GPL(cnl_sst_init_fw);
474
475 void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
476 {
477 if (skl->dsp->fw)
478 release_firmware(skl->dsp->fw);
479
480 skl_freeup_uuid_list(skl);
481 cnl_ipc_free(&skl->ipc);
482
483 skl->dsp->ops->free(skl->dsp);
484 }
485 EXPORT_SYMBOL_GPL(cnl_sst_dsp_cleanup);
486
487 MODULE_LICENSE("GPL v2");
488 MODULE_DESCRIPTION("Intel Cannonlake IPC driver");