This source file includes following definitions.
- lx_dsp_register
- lx_dsp_reg_read
- lx_dsp_reg_readbuf
- lx_dsp_reg_write
- lx_dsp_reg_writebuf
- lx_plx_register
- lx_plx_reg_read
- lx_plx_reg_write
- lx_message_init
- lx_message_dump
- lx_message_dump
- lx_message_send_atomic
- lx_dsp_get_version
- lx_dsp_get_clock_frequency
- lx_dsp_get_mac
- lx_dsp_set_granularity
- lx_dsp_read_async_events
- lx_pipe_allocate
- lx_pipe_release
- lx_buffer_ask
- lx_pipe_stop
- lx_pipe_toggle_state
- lx_pipe_start
- lx_pipe_pause
- lx_pipe_sample_count
- lx_pipe_state
- lx_pipe_wait_for_state
- lx_pipe_wait_for_start
- lx_pipe_wait_for_idle
- lx_stream_set_state
- lx_stream_set_format
- lx_stream_state
- lx_stream_sample_position
- lx_buffer_give
- lx_buffer_free
- lx_buffer_cancel
- lx_level_unmute
- lx_level_peaks
- lx_interrupt_test_ack
- lx_interrupt_ack
- lx_interrupt_handle_async_events
- lx_interrupt_request_new_buffer
- lx_interrupt
- lx_threaded_irq
- lx_irq_set
- lx_irq_enable
- lx_irq_disable
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/bitops.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16
17 #include "lx6464es.h"
18 #include "lx_core.h"
19
20
21
22 static const unsigned long dsp_port_offsets[] = {
23 0,
24 0x400,
25 0x401,
26 0x402,
27 0x403,
28 0x404,
29 0x405,
30 0x406,
31 0x407,
32 0x408,
33 0x409,
34 0x40a,
35 0x40b,
36 0x40c,
37
38 0x410,
39 0x411,
40 0x412,
41 0x413,
42 0x414,
43 0x415,
44 0x416,
45
46 0x420,
47 0x430,
48 0x431,
49 0x432,
50 0x433,
51 0x434,
52 0x440
53 };
54
55 static void __iomem *lx_dsp_register(struct lx6464es *chip, int port)
56 {
57 void __iomem *base_address = chip->port_dsp_bar;
58 return base_address + dsp_port_offsets[port]*4;
59 }
60
61 unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
62 {
63 void __iomem *address = lx_dsp_register(chip, port);
64 return ioread32(address);
65 }
66
67 static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data,
68 u32 len)
69 {
70 u32 __iomem *address = lx_dsp_register(chip, port);
71 int i;
72
73
74 for (i = 0; i != len; ++i)
75 data[i] = ioread32(address + i);
76 }
77
78
79 void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
80 {
81 void __iomem *address = lx_dsp_register(chip, port);
82 iowrite32(data, address);
83 }
84
85 static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port,
86 const u32 *data, u32 len)
87 {
88 u32 __iomem *address = lx_dsp_register(chip, port);
89 int i;
90
91
92 for (i = 0; i != len; ++i)
93 iowrite32(data[i], address + i);
94 }
95
96
97 static const unsigned long plx_port_offsets[] = {
98 0x04,
99 0x40,
100 0x44,
101 0x48,
102 0x4c,
103 0x50,
104 0x54,
105 0x58,
106 0x5c,
107 0x64,
108 0x68,
109 0x6C
110 };
111
112 static void __iomem *lx_plx_register(struct lx6464es *chip, int port)
113 {
114 void __iomem *base_address = chip->port_plx_remapped;
115 return base_address + plx_port_offsets[port];
116 }
117
118 unsigned long lx_plx_reg_read(struct lx6464es *chip, int port)
119 {
120 void __iomem *address = lx_plx_register(chip, port);
121 return ioread32(address);
122 }
123
124 void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data)
125 {
126 void __iomem *address = lx_plx_register(chip, port);
127 iowrite32(data, address);
128 }
129
130
131
132 #ifdef CONFIG_SND_DEBUG
133 #define CMD_NAME(a) a
134 #else
135 #define CMD_NAME(a) NULL
136 #endif
137
138 #define Reg_CSM_MR 0x00000002
139 #define Reg_CSM_MC 0x00000001
140
141 struct dsp_cmd_info {
142 u32 dcCodeOp;
143
144 u16 dcCmdLength;
145 u16 dcStatusType;
146
147 u16 dcStatusLength;
148 char *dcOpName;
149 };
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164 static struct dsp_cmd_info dsp_commands[] =
165 {
166 { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1
167 , 1 , 0 , CMD_NAME("INFO_DEBUG") },
168 { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1
169 , 1 , 2 , CMD_NAME("GET_SYS_CFG") },
170 { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1
171 , 1 , 0 , CMD_NAME("SET_GRANULARITY") },
172 { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1
173 , 1 , 0 , CMD_NAME("SET_TIMER_IRQ") },
174 { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1
175 , 1 , 0 , CMD_NAME("GET_EVENT") },
176 { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1
177 , 1 , 2 , CMD_NAME("GET_PIPES") },
178 { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1
179 , 0 , 0 , CMD_NAME("ALLOCATE_PIPE") },
180 { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1
181 , 0 , 0 , CMD_NAME("RELEASE_PIPE") },
182 { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1
183 , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") },
184 { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1
185 , 0 , 0 , CMD_NAME("STOP_PIPE") },
186 { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1
187 , 1 , 1 , CMD_NAME("GET_PIPE_SPL_COUNT") },
188 { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1
189 , 1 , 0 , CMD_NAME("TOGGLE_PIPE_STATE") },
190 { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1
191 , 1 , 0 , CMD_NAME("DEF_STREAM") },
192 { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3
193 , 1 , 0 , CMD_NAME("SET_MUTE") },
194 { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1
195 , 1 , 2 , CMD_NAME("GET_STREAM_SPL_COUNT") },
196 { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3
197 , 0 , 1 , CMD_NAME("UPDATE_BUFFER") },
198 { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1
199 , 1 , 4 , CMD_NAME("GET_BUFFER") },
200 { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1
201 , 1 , 1 , CMD_NAME("CANCEL_BUFFER") },
202 { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1
203 , 1 , 1 , CMD_NAME("GET_PEAK") },
204 { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1
205 , 1 , 0 , CMD_NAME("SET_STREAM_STATE") },
206 };
207
208 static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd)
209 {
210 snd_BUG_ON(cmd >= CMD_14_INVALID);
211
212 rmh->cmd[0] = dsp_commands[cmd].dcCodeOp;
213 rmh->cmd_len = dsp_commands[cmd].dcCmdLength;
214 rmh->stat_len = dsp_commands[cmd].dcStatusLength;
215 rmh->dsp_stat = dsp_commands[cmd].dcStatusType;
216 rmh->cmd_idx = cmd;
217 memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32));
218
219 #ifdef CONFIG_SND_DEBUG
220 memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32));
221 #endif
222 #ifdef RMH_DEBUG
223 rmh->cmd_idx = cmd;
224 #endif
225 }
226
227 #ifdef RMH_DEBUG
228 #define LXRMH "lx6464es rmh: "
229 static void lx_message_dump(struct lx_rmh *rmh)
230 {
231 u8 idx = rmh->cmd_idx;
232 int i;
233
234 snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName);
235
236 for (i = 0; i != rmh->cmd_len; ++i)
237 snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]);
238
239 for (i = 0; i != rmh->stat_len; ++i)
240 snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]);
241 snd_printk("\n");
242 }
243 #else
244 static inline void lx_message_dump(struct lx_rmh *rmh)
245 {}
246 #endif
247
248
249
250
251 #define XILINX_TIMEOUT_MS 40
252 #define XILINX_POLL_NO_SLEEP 100
253 #define XILINX_POLL_ITERATIONS 150
254
255
256 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh)
257 {
258 u32 reg = ED_DSP_TIMED_OUT;
259 int dwloop;
260
261 if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) {
262 dev_err(chip->card->dev, "PIOSendMessage eReg_CSM %x\n", reg);
263 return -EBUSY;
264 }
265
266
267 lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len);
268
269
270 lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC);
271
272
273 for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) {
274 if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) {
275 if (rmh->dsp_stat == 0)
276 reg = lx_dsp_reg_read(chip, eReg_CRM1);
277 else
278 reg = 0;
279 goto polling_successful;
280 } else
281 udelay(1);
282 }
283 dev_warn(chip->card->dev, "TIMEOUT lx_message_send_atomic! "
284 "polling failed\n");
285
286 polling_successful:
287 if ((reg & ERROR_VALUE) == 0) {
288
289 if (rmh->stat_len) {
290 snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1));
291 lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat,
292 rmh->stat_len);
293 }
294 } else
295 dev_err(chip->card->dev, "rmh error: %08x\n", reg);
296
297
298 lx_dsp_reg_write(chip, eReg_CSM, 0);
299
300 switch (reg) {
301 case ED_DSP_TIMED_OUT:
302 dev_warn(chip->card->dev, "lx_message_send: dsp timeout\n");
303 return -ETIMEDOUT;
304
305 case ED_DSP_CRASHED:
306 dev_warn(chip->card->dev, "lx_message_send: dsp crashed\n");
307 return -EAGAIN;
308 }
309
310 lx_message_dump(rmh);
311
312 return reg;
313 }
314
315
316
317 int lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version)
318 {
319 u16 ret;
320
321 mutex_lock(&chip->msg_lock);
322
323 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
324 ret = lx_message_send_atomic(chip, &chip->rmh);
325
326 *rdsp_version = chip->rmh.stat[1];
327 mutex_unlock(&chip->msg_lock);
328 return ret;
329 }
330
331 int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq)
332 {
333 u16 ret = 0;
334 u32 freq_raw = 0;
335 u32 freq = 0;
336 u32 frequency = 0;
337
338 mutex_lock(&chip->msg_lock);
339
340 lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG);
341 ret = lx_message_send_atomic(chip, &chip->rmh);
342
343 if (ret == 0) {
344 freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET;
345 freq = freq_raw & XES_FREQ_COUNT8_MASK;
346
347 if ((freq < XES_FREQ_COUNT8_48_MAX) ||
348 (freq > XES_FREQ_COUNT8_44_MIN))
349 frequency = 0;
350 else if (freq >= XES_FREQ_COUNT8_44_MAX)
351 frequency = 44100;
352 else
353 frequency = 48000;
354 }
355
356 mutex_unlock(&chip->msg_lock);
357
358 *rfreq = frequency * chip->freq_ratio;
359
360 return ret;
361 }
362
363 int lx_dsp_get_mac(struct lx6464es *chip)
364 {
365 u32 macmsb, maclsb;
366
367 macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF;
368 maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF;
369
370
371 chip->mac_address[5] = ((u8 *)(&maclsb))[0];
372 chip->mac_address[4] = ((u8 *)(&maclsb))[1];
373 chip->mac_address[3] = ((u8 *)(&maclsb))[2];
374 chip->mac_address[2] = ((u8 *)(&macmsb))[0];
375 chip->mac_address[1] = ((u8 *)(&macmsb))[1];
376 chip->mac_address[0] = ((u8 *)(&macmsb))[2];
377
378 return 0;
379 }
380
381
382 int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran)
383 {
384 int ret;
385
386 mutex_lock(&chip->msg_lock);
387
388 lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY);
389 chip->rmh.cmd[0] |= gran;
390
391 ret = lx_message_send_atomic(chip, &chip->rmh);
392 mutex_unlock(&chip->msg_lock);
393 return ret;
394 }
395
396 int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data)
397 {
398 int ret;
399
400 mutex_lock(&chip->msg_lock);
401
402 lx_message_init(&chip->rmh, CMD_04_GET_EVENT);
403 chip->rmh.stat_len = 9;
404
405 ret = lx_message_send_atomic(chip, &chip->rmh);
406
407 if (!ret)
408 memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32));
409
410 mutex_unlock(&chip->msg_lock);
411 return ret;
412 }
413
414 #define PIPE_INFO_TO_CMD(capture, pipe) \
415 ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET)
416
417
418
419
420 int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture,
421 int channels)
422 {
423 int err;
424 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
425
426 mutex_lock(&chip->msg_lock);
427 lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE);
428
429 chip->rmh.cmd[0] |= pipe_cmd;
430 chip->rmh.cmd[0] |= channels;
431
432 err = lx_message_send_atomic(chip, &chip->rmh);
433 mutex_unlock(&chip->msg_lock);
434
435 if (err != 0)
436 dev_err(chip->card->dev, "could not allocate pipe\n");
437
438 return err;
439 }
440
441 int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture)
442 {
443 int err;
444 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
445
446 mutex_lock(&chip->msg_lock);
447 lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE);
448
449 chip->rmh.cmd[0] |= pipe_cmd;
450
451 err = lx_message_send_atomic(chip, &chip->rmh);
452 mutex_unlock(&chip->msg_lock);
453
454 return err;
455 }
456
457 int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture,
458 u32 *r_needed, u32 *r_freed, u32 *size_array)
459 {
460 int err;
461 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
462
463 #ifdef CONFIG_SND_DEBUG
464 if (size_array)
465 memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER);
466 #endif
467
468 *r_needed = 0;
469 *r_freed = 0;
470
471 mutex_lock(&chip->msg_lock);
472 lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS);
473
474 chip->rmh.cmd[0] |= pipe_cmd;
475
476 err = lx_message_send_atomic(chip, &chip->rmh);
477
478 if (!err) {
479 int i;
480 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
481 u32 stat = chip->rmh.stat[i];
482 if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) {
483
484 *r_freed += 1;
485 if (size_array)
486 size_array[i] = stat & MASK_DATA_SIZE;
487 } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET))
488 == 0)
489
490 *r_needed += 1;
491 }
492
493 dev_dbg(chip->card->dev,
494 "CMD_08_ASK_BUFFERS: needed %d, freed %d\n",
495 *r_needed, *r_freed);
496 for (i = 0; i < MAX_STREAM_BUFFER; ++i) {
497 for (i = 0; i != chip->rmh.stat_len; ++i)
498 dev_dbg(chip->card->dev,
499 " stat[%d]: %x, %x\n", i,
500 chip->rmh.stat[i],
501 chip->rmh.stat[i] & MASK_DATA_SIZE);
502 }
503 }
504
505 mutex_unlock(&chip->msg_lock);
506 return err;
507 }
508
509
510 int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture)
511 {
512 int err;
513 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
514
515 mutex_lock(&chip->msg_lock);
516 lx_message_init(&chip->rmh, CMD_09_STOP_PIPE);
517
518 chip->rmh.cmd[0] |= pipe_cmd;
519
520 err = lx_message_send_atomic(chip, &chip->rmh);
521
522 mutex_unlock(&chip->msg_lock);
523 return err;
524 }
525
526 static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture)
527 {
528 int err;
529 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
530
531 mutex_lock(&chip->msg_lock);
532 lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE);
533
534 chip->rmh.cmd[0] |= pipe_cmd;
535
536 err = lx_message_send_atomic(chip, &chip->rmh);
537
538 mutex_unlock(&chip->msg_lock);
539 return err;
540 }
541
542
543 int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture)
544 {
545 int err;
546
547 err = lx_pipe_wait_for_idle(chip, pipe, is_capture);
548 if (err < 0)
549 return err;
550
551 err = lx_pipe_toggle_state(chip, pipe, is_capture);
552
553 return err;
554 }
555
556 int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture)
557 {
558 int err = 0;
559
560 err = lx_pipe_wait_for_start(chip, pipe, is_capture);
561 if (err < 0)
562 return err;
563
564 err = lx_pipe_toggle_state(chip, pipe, is_capture);
565
566 return err;
567 }
568
569
570 int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture,
571 u64 *rsample_count)
572 {
573 int err;
574 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
575
576 mutex_lock(&chip->msg_lock);
577 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
578
579 chip->rmh.cmd[0] |= pipe_cmd;
580 chip->rmh.stat_len = 2;
581
582 err = lx_message_send_atomic(chip, &chip->rmh);
583
584 if (err != 0)
585 dev_err(chip->card->dev,
586 "could not query pipe's sample count\n");
587 else {
588 *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
589 << 24)
590 + chip->rmh.stat[1];
591 }
592
593 mutex_unlock(&chip->msg_lock);
594 return err;
595 }
596
597 int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate)
598 {
599 int err;
600 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
601
602 mutex_lock(&chip->msg_lock);
603 lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT);
604
605 chip->rmh.cmd[0] |= pipe_cmd;
606
607 err = lx_message_send_atomic(chip, &chip->rmh);
608
609 if (err != 0)
610 dev_err(chip->card->dev, "could not query pipe's state\n");
611 else
612 *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F;
613
614 mutex_unlock(&chip->msg_lock);
615 return err;
616 }
617
618 static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe,
619 int is_capture, u16 state)
620 {
621 int i;
622
623
624
625 for (i = 0; i != 50; ++i) {
626 u16 current_state;
627 int err = lx_pipe_state(chip, pipe, is_capture, ¤t_state);
628
629 if (err < 0)
630 return err;
631
632 if (!err && current_state == state)
633 return 0;
634
635 mdelay(1);
636 }
637
638 return -ETIMEDOUT;
639 }
640
641 int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture)
642 {
643 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN);
644 }
645
646 int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture)
647 {
648 return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE);
649 }
650
651
652 int lx_stream_set_state(struct lx6464es *chip, u32 pipe,
653 int is_capture, enum stream_state_t state)
654 {
655 int err;
656 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
657
658 mutex_lock(&chip->msg_lock);
659 lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE);
660
661 chip->rmh.cmd[0] |= pipe_cmd;
662 chip->rmh.cmd[0] |= state;
663
664 err = lx_message_send_atomic(chip, &chip->rmh);
665 mutex_unlock(&chip->msg_lock);
666
667 return err;
668 }
669
670 int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime,
671 u32 pipe, int is_capture)
672 {
673 int err;
674 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
675 u32 channels = runtime->channels;
676
677 if (runtime->channels != channels)
678 dev_err(chip->card->dev, "channel count mismatch: %d vs %d",
679 runtime->channels, channels);
680
681 mutex_lock(&chip->msg_lock);
682 lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM);
683
684 chip->rmh.cmd[0] |= pipe_cmd;
685
686 if (runtime->sample_bits == 16)
687
688 chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET);
689
690 if (snd_pcm_format_little_endian(runtime->format))
691
692 chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET);
693
694 chip->rmh.cmd[0] |= channels-1;
695
696 err = lx_message_send_atomic(chip, &chip->rmh);
697 mutex_unlock(&chip->msg_lock);
698
699 return err;
700 }
701
702 int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture,
703 int *rstate)
704 {
705 int err;
706 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
707
708 mutex_lock(&chip->msg_lock);
709 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
710
711 chip->rmh.cmd[0] |= pipe_cmd;
712
713 err = lx_message_send_atomic(chip, &chip->rmh);
714
715 *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE;
716
717 mutex_unlock(&chip->msg_lock);
718 return err;
719 }
720
721 int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture,
722 u64 *r_bytepos)
723 {
724 int err;
725 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
726
727 mutex_lock(&chip->msg_lock);
728 lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT);
729
730 chip->rmh.cmd[0] |= pipe_cmd;
731
732 err = lx_message_send_atomic(chip, &chip->rmh);
733
734 *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI)
735 << 32)
736 + chip->rmh.stat[1];
737
738 mutex_unlock(&chip->msg_lock);
739 return err;
740 }
741
742
743 int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture,
744 u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi,
745 u32 *r_buffer_index)
746 {
747 int err;
748 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
749
750 mutex_lock(&chip->msg_lock);
751 lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER);
752
753 chip->rmh.cmd[0] |= pipe_cmd;
754 chip->rmh.cmd[0] |= BF_NOTIFY_EOB;
755
756
757
758 chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE;
759 chip->rmh.cmd[2] = buf_address_lo;
760
761 if (buf_address_hi) {
762 chip->rmh.cmd_len = 4;
763 chip->rmh.cmd[3] = buf_address_hi;
764 chip->rmh.cmd[0] |= BF_64BITS_ADR;
765 }
766
767 err = lx_message_send_atomic(chip, &chip->rmh);
768
769 if (err == 0) {
770 *r_buffer_index = chip->rmh.stat[0];
771 goto done;
772 }
773
774 if (err == EB_RBUFFERS_TABLE_OVERFLOW)
775 dev_err(chip->card->dev,
776 "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n");
777
778 if (err == EB_INVALID_STREAM)
779 dev_err(chip->card->dev,
780 "lx_buffer_give EB_INVALID_STREAM\n");
781
782 if (err == EB_CMD_REFUSED)
783 dev_err(chip->card->dev,
784 "lx_buffer_give EB_CMD_REFUSED\n");
785
786 done:
787 mutex_unlock(&chip->msg_lock);
788 return err;
789 }
790
791 int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture,
792 u32 *r_buffer_size)
793 {
794 int err;
795 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
796
797 mutex_lock(&chip->msg_lock);
798 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
799
800 chip->rmh.cmd[0] |= pipe_cmd;
801 chip->rmh.cmd[0] |= MASK_BUFFER_ID;
802
803
804 err = lx_message_send_atomic(chip, &chip->rmh);
805
806 if (err == 0)
807 *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE;
808
809 mutex_unlock(&chip->msg_lock);
810 return err;
811 }
812
813 int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture,
814 u32 buffer_index)
815 {
816 int err;
817 u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe);
818
819 mutex_lock(&chip->msg_lock);
820 lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER);
821
822 chip->rmh.cmd[0] |= pipe_cmd;
823 chip->rmh.cmd[0] |= buffer_index;
824
825 err = lx_message_send_atomic(chip, &chip->rmh);
826
827 mutex_unlock(&chip->msg_lock);
828 return err;
829 }
830
831
832
833
834
835
836
837 int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute)
838 {
839 int err;
840
841 u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU;
842
843 mutex_lock(&chip->msg_lock);
844 lx_message_init(&chip->rmh, CMD_0D_SET_MUTE);
845
846 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0);
847
848 chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32);
849 chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF);
850
851 dev_dbg(chip->card->dev,
852 "mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1],
853 chip->rmh.cmd[2]);
854
855 err = lx_message_send_atomic(chip, &chip->rmh);
856
857 mutex_unlock(&chip->msg_lock);
858 return err;
859 }
860
861 static u32 peak_map[] = {
862 0x00000109,
863 0x0000083B,
864 0x000020C4,
865 0x00008273,
866 0x00020756,
867 0x00040C37,
868 0x00081385,
869 0x00101D3F,
870 0x0016C310,
871 0x002026F2,
872 0x002D6A86,
873 0x004026E6,
874 0x005A9DF6,
875 0x0065AC8B,
876 0x00721481,
877 0x007FFFFF,
878 };
879
880 int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels,
881 u32 *r_levels)
882 {
883 int err = 0;
884 int i;
885
886 mutex_lock(&chip->msg_lock);
887 for (i = 0; i < channels; i += 4) {
888 u32 s0, s1, s2, s3;
889
890 lx_message_init(&chip->rmh, CMD_12_GET_PEAK);
891 chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i);
892
893 err = lx_message_send_atomic(chip, &chip->rmh);
894
895 if (err == 0) {
896 s0 = peak_map[chip->rmh.stat[0] & 0x0F];
897 s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf];
898 s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf];
899 s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf];
900 } else
901 s0 = s1 = s2 = s3 = 0;
902
903 r_levels[0] = s0;
904 r_levels[1] = s1;
905 r_levels[2] = s2;
906 r_levels[3] = s3;
907
908 r_levels += 4;
909 }
910
911 mutex_unlock(&chip->msg_lock);
912 return err;
913 }
914
915
916 #define PCX_IRQ_NONE 0
917 #define IRQCS_ACTIVE_PCIDB BIT(13)
918 #define IRQCS_ENABLE_PCIIRQ BIT(8)
919 #define IRQCS_ENABLE_PCIDB BIT(9)
920
921 static u32 lx_interrupt_test_ack(struct lx6464es *chip)
922 {
923 u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS);
924
925
926 if (irqcs & IRQCS_ACTIVE_PCIDB) {
927 u32 temp;
928 irqcs = PCX_IRQ_NONE;
929
930 while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) {
931
932 irqcs |= temp;
933 lx_plx_reg_write(chip, ePLX_L2PCIDB, temp);
934 }
935
936 return irqcs;
937 }
938 return PCX_IRQ_NONE;
939 }
940
941 static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc,
942 int *r_async_pending, int *r_async_escmd)
943 {
944 u32 irq_async;
945 u32 irqsrc = lx_interrupt_test_ack(chip);
946
947 if (irqsrc == PCX_IRQ_NONE)
948 return 0;
949
950 *r_irqsrc = irqsrc;
951
952 irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS;
953
954
955 if (irq_async & MASK_SYS_STATUS_ESA) {
956 irq_async &= ~MASK_SYS_STATUS_ESA;
957 *r_async_escmd = 1;
958 }
959
960 if (irq_async) {
961
962 *r_async_pending = 1;
963 }
964
965 return 1;
966 }
967
968 static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc,
969 int *r_freq_changed,
970 u64 *r_notified_in_pipe_mask,
971 u64 *r_notified_out_pipe_mask)
972 {
973 int err;
974 u32 stat[9];
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989 int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0;
990 int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0;
991
992 *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0;
993
994 err = lx_dsp_read_async_events(chip, stat);
995 if (err < 0)
996 return err;
997
998 if (eb_pending_in) {
999 *r_notified_in_pipe_mask = ((u64)stat[3] << 32)
1000 + stat[4];
1001 dev_dbg(chip->card->dev, "interrupt: EOBI pending %llx\n",
1002 *r_notified_in_pipe_mask);
1003 }
1004 if (eb_pending_out) {
1005 *r_notified_out_pipe_mask = ((u64)stat[1] << 32)
1006 + stat[2];
1007 dev_dbg(chip->card->dev, "interrupt: EOBO pending %llx\n",
1008 *r_notified_out_pipe_mask);
1009 }
1010
1011
1012
1013 return err;
1014 }
1015
1016 static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
1017 struct lx_stream *lx_stream)
1018 {
1019 struct snd_pcm_substream *substream = lx_stream->stream;
1020 const unsigned int is_capture = lx_stream->is_capture;
1021 int err;
1022
1023 const u32 channels = substream->runtime->channels;
1024 const u32 bytes_per_frame = channels * 3;
1025 const u32 period_size = substream->runtime->period_size;
1026 const u32 period_bytes = period_size * bytes_per_frame;
1027 const u32 pos = lx_stream->frame_pos;
1028 const u32 next_pos = ((pos+1) == substream->runtime->periods) ?
1029 0 : pos + 1;
1030
1031 dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes;
1032 u32 buf_hi = 0;
1033 u32 buf_lo = 0;
1034 u32 buffer_index = 0;
1035
1036 u32 needed, freed;
1037 u32 size_array[MAX_STREAM_BUFFER];
1038
1039 dev_dbg(chip->card->dev, "->lx_interrupt_request_new_buffer\n");
1040
1041 mutex_lock(&chip->lock);
1042
1043 err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
1044 dev_dbg(chip->card->dev,
1045 "interrupt: needed %d, freed %d\n", needed, freed);
1046
1047 unpack_pointer(buf, &buf_lo, &buf_hi);
1048 err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi,
1049 &buffer_index);
1050 dev_dbg(chip->card->dev,
1051 "interrupt: gave buffer index %x on 0x%lx (%d bytes)\n",
1052 buffer_index, (unsigned long)buf, period_bytes);
1053
1054 lx_stream->frame_pos = next_pos;
1055 mutex_unlock(&chip->lock);
1056
1057 return err;
1058 }
1059
1060 irqreturn_t lx_interrupt(int irq, void *dev_id)
1061 {
1062 struct lx6464es *chip = dev_id;
1063 int async_pending, async_escmd;
1064 u32 irqsrc;
1065 bool wake_thread = false;
1066
1067 dev_dbg(chip->card->dev,
1068 "**************************************************\n");
1069
1070 if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) {
1071 dev_dbg(chip->card->dev, "IRQ_NONE\n");
1072 return IRQ_NONE;
1073 }
1074
1075 if (irqsrc & MASK_SYS_STATUS_CMD_DONE)
1076 return IRQ_HANDLED;
1077
1078 if (irqsrc & MASK_SYS_STATUS_EOBI)
1079 dev_dbg(chip->card->dev, "interrupt: EOBI\n");
1080
1081 if (irqsrc & MASK_SYS_STATUS_EOBO)
1082 dev_dbg(chip->card->dev, "interrupt: EOBO\n");
1083
1084 if (irqsrc & MASK_SYS_STATUS_URUN)
1085 dev_dbg(chip->card->dev, "interrupt: URUN\n");
1086
1087 if (irqsrc & MASK_SYS_STATUS_ORUN)
1088 dev_dbg(chip->card->dev, "interrupt: ORUN\n");
1089
1090 if (async_pending) {
1091 wake_thread = true;
1092 chip->irqsrc = irqsrc;
1093 }
1094
1095 if (async_escmd) {
1096
1097
1098
1099
1100
1101
1102 dev_dbg(chip->card->dev, "interrupt requests escmd handling\n");
1103 }
1104
1105 return wake_thread ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1106 }
1107
1108 irqreturn_t lx_threaded_irq(int irq, void *dev_id)
1109 {
1110 struct lx6464es *chip = dev_id;
1111 u64 notified_in_pipe_mask = 0;
1112 u64 notified_out_pipe_mask = 0;
1113 int freq_changed;
1114 int err;
1115
1116
1117 err = lx_interrupt_handle_async_events(chip, chip->irqsrc,
1118 &freq_changed,
1119 ¬ified_in_pipe_mask,
1120 ¬ified_out_pipe_mask);
1121 if (err)
1122 dev_err(chip->card->dev, "error handling async events\n");
1123
1124 if (notified_in_pipe_mask) {
1125 struct lx_stream *lx_stream = &chip->capture_stream;
1126
1127 dev_dbg(chip->card->dev,
1128 "requesting audio transfer for capture\n");
1129 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1130 if (err < 0)
1131 dev_err(chip->card->dev,
1132 "cannot request new buffer for capture\n");
1133 snd_pcm_period_elapsed(lx_stream->stream);
1134 }
1135
1136 if (notified_out_pipe_mask) {
1137 struct lx_stream *lx_stream = &chip->playback_stream;
1138
1139 dev_dbg(chip->card->dev,
1140 "requesting audio transfer for playback\n");
1141 err = lx_interrupt_request_new_buffer(chip, lx_stream);
1142 if (err < 0)
1143 dev_err(chip->card->dev,
1144 "cannot request new buffer for playback\n");
1145 snd_pcm_period_elapsed(lx_stream->stream);
1146 }
1147
1148 return IRQ_HANDLED;
1149 }
1150
1151
1152 static void lx_irq_set(struct lx6464es *chip, int enable)
1153 {
1154 u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS);
1155
1156
1157
1158
1159
1160
1161 if (enable)
1162 reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1163 else
1164 reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB);
1165 lx_plx_reg_write(chip, ePLX_IRQCS, reg);
1166 }
1167
1168 void lx_irq_enable(struct lx6464es *chip)
1169 {
1170 dev_dbg(chip->card->dev, "->lx_irq_enable\n");
1171 lx_irq_set(chip, 1);
1172 }
1173
1174 void lx_irq_disable(struct lx6464es *chip)
1175 {
1176 dev_dbg(chip->card->dev, "->lx_irq_disable\n");
1177 lx_irq_set(chip, 0);
1178 }