This source file includes following definitions.
- print_cb
- print_cb_list
- prep_dma_xor_dbg
- prep_dma_pq_dbg
- prep_dma_pqzero_sum_dbg
- ppc440spe_desc_init_interrupt
- ppc440spe_desc_init_null_xor
- ppc440spe_desc_init_xor
- ppc440spe_desc_init_dma2pq
- ppc440spe_desc_init_dma01pq
- ppc440spe_desc_init_dma01pqzero_sum
- ppc440spe_desc_init_memcpy
- ppc440spe_desc_set_src_addr
- ppc440spe_desc_set_src_mult
- ppc440spe_desc_set_dest_addr
- ppc440spe_desc_set_byte_count
- ppc440spe_desc_set_rxor_block_size
- ppc440spe_desc_set_dcheck
- ppc440spe_xor_set_link
- ppc440spe_desc_set_link
- ppc440spe_desc_get_link
- ppc440spe_desc_is_aligned
- ppc440spe_chan_xor_slot_count
- ppc440spe_dma2_pq_slot_count
- ppc440spe_adma_device_clear_eot_status
- ppc440spe_chan_is_busy
- ppc440spe_chan_set_first_xor_descriptor
- ppc440spe_dma_put_desc
- ppc440spe_chan_append
- ppc440spe_chan_get_current_descriptor
- ppc440spe_chan_run
- ppc440spe_can_rxor
- ppc440spe_adma_estimate
- ppc440spe_async_tx_find_best_channel
- ppc440spe_get_group_entry
- ppc440spe_adma_free_slots
- ppc440spe_adma_run_tx_complete_actions
- ppc440spe_adma_clean_slot
- __ppc440spe_adma_slot_cleanup
- ppc440spe_adma_tasklet
- ppc440spe_adma_slot_cleanup
- ppc440spe_adma_alloc_slots
- ppc440spe_adma_alloc_chan_resources
- ppc440spe_rxor_set_region
- ppc440spe_rxor_set_src
- ppc440spe_rxor_set_mult
- ppc440spe_adma_check_threshold
- ppc440spe_adma_tx_submit
- ppc440spe_adma_prep_dma_interrupt
- ppc440spe_adma_prep_dma_memcpy
- ppc440spe_adma_prep_dma_xor
- ppc440spe_adma_init_dma2rxor_slot
- ppc440spe_dma01_prep_mult
- ppc440spe_dma01_prep_sum_product
- ppc440spe_dma01_prep_pq
- ppc440spe_dma2_prep_pq
- ppc440spe_adma_prep_dma_pq
- ppc440spe_adma_prep_dma_pqzero_sum
- ppc440spe_adma_prep_dma_xor_zero_sum
- ppc440spe_adma_set_dest
- ppc440spe_adma_pq_zero_op
- ppc440spe_adma_pq_set_dest
- ppc440spe_adma_pqzero_sum_set_dest
- ppc440spe_desc_set_xor_src_cnt
- ppc440spe_adma_pq_set_src
- ppc440spe_adma_memcpy_xor_set_src
- ppc440spe_adma_dma2rxor_inc_addr
- ppc440spe_adma_dma2rxor_prep_src
- ppc440spe_adma_dma2rxor_set_src
- ppc440spe_adma_dma2rxor_set_mult
- ppc440spe_init_rxor_cursor
- ppc440spe_adma_pq_set_src_mult
- ppc440spe_adma_free_chan_resources
- ppc440spe_adma_tx_status
- ppc440spe_adma_eot_handler
- ppc440spe_adma_err_handler
- ppc440spe_test_callback
- ppc440spe_adma_issue_pending
- ppc440spe_chan_start_null_xor
- ppc440spe_test_raid6
- ppc440spe_adma_init_capabilities
- ppc440spe_adma_setup_irqs
- ppc440spe_adma_release_irqs
- ppc440spe_adma_probe
- ppc440spe_adma_remove
- devices_show
- enable_show
- enable_store
- poly_show
- poly_store
- ppc440spe_configure_raid_devices
- ppc440spe_adma_init
- ppc440spe_adma_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/async_tx.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/proc_fs.h>
28 #include <linux/of.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_platform.h>
32 #include <asm/dcr.h>
33 #include <asm/dcr-regs.h>
34 #include "adma.h"
35 #include "../dmaengine.h"
36
37 enum ppc_adma_init_code {
38 PPC_ADMA_INIT_OK = 0,
39 PPC_ADMA_INIT_MEMRES,
40 PPC_ADMA_INIT_MEMREG,
41 PPC_ADMA_INIT_ALLOC,
42 PPC_ADMA_INIT_COHERENT,
43 PPC_ADMA_INIT_CHANNEL,
44 PPC_ADMA_INIT_IRQ1,
45 PPC_ADMA_INIT_IRQ2,
46 PPC_ADMA_INIT_REGISTER
47 };
48
49 static char *ppc_adma_errors[] = {
50 [PPC_ADMA_INIT_OK] = "ok",
51 [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
52 [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
53 [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
54 "structure",
55 [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
56 "hardware descriptors",
57 [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
58 [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
59 [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
60 [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
61 };
62
63 static enum ppc_adma_init_code
64 ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
65
66 struct ppc_dma_chan_ref {
67 struct dma_chan *chan;
68 struct list_head node;
69 };
70
71
72 struct list_head
73 ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
74
75
76
77
78 static u32 do_xor_refetch;
79
80
81 static void *ppc440spe_dma_fifo_buf;
82
83
84 static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
85 static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
86
87
88 static struct ppc440spe_adma_desc_slot *xor_last_linked;
89 static struct ppc440spe_adma_desc_slot *xor_last_submit;
90
91
92 static char ppc440spe_qword[16];
93
94 static atomic_t ppc440spe_adma_err_irq_ref;
95 static dcr_host_t ppc440spe_mq_dcr_host;
96 static unsigned int ppc440spe_mq_dcr_len;
97
98
99
100
101
102
103
104 static unsigned long ppc440spe_rxor_state;
105
106
107
108 static u32 ppc440spe_r6_enabled;
109 static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
110 static struct completion ppc440spe_r6_test_comp;
111
112 static int ppc440spe_adma_dma2rxor_prep_src(
113 struct ppc440spe_adma_desc_slot *desc,
114 struct ppc440spe_rxor *cursor, int index,
115 int src_cnt, u32 addr);
116 static void ppc440spe_adma_dma2rxor_set_src(
117 struct ppc440spe_adma_desc_slot *desc,
118 int index, dma_addr_t addr);
119 static void ppc440spe_adma_dma2rxor_set_mult(
120 struct ppc440spe_adma_desc_slot *desc,
121 int index, u8 mult);
122
123 #ifdef ADMA_LL_DEBUG
124 #define ADMA_LL_DBG(x) ({ if (1) x; 0; })
125 #else
126 #define ADMA_LL_DBG(x) ({ if (0) x; 0; })
127 #endif
128
129 static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
130 {
131 struct dma_cdb *cdb;
132 struct xor_cb *cb;
133 int i;
134
135 switch (chan->device->id) {
136 case 0:
137 case 1:
138 cdb = block;
139
140 pr_debug("CDB at %p [%d]:\n"
141 "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
142 "\t sg1u 0x%08x sg1l 0x%08x\n"
143 "\t sg2u 0x%08x sg2l 0x%08x\n"
144 "\t sg3u 0x%08x sg3l 0x%08x\n",
145 cdb, chan->device->id,
146 cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
147 le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
148 le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
149 le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
150 );
151 break;
152 case 2:
153 cb = block;
154
155 pr_debug("CB at %p [%d]:\n"
156 "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
157 "\t cbtah 0x%08x cbtal 0x%08x\n"
158 "\t cblah 0x%08x cblal 0x%08x\n",
159 cb, chan->device->id,
160 cb->cbc, cb->cbbc, cb->cbs,
161 cb->cbtah, cb->cbtal,
162 cb->cblah, cb->cblal);
163 for (i = 0; i < 16; i++) {
164 if (i && !cb->ops[i].h && !cb->ops[i].l)
165 continue;
166 pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
167 i, cb->ops[i].h, cb->ops[i].l);
168 }
169 break;
170 }
171 }
172
173 static void print_cb_list(struct ppc440spe_adma_chan *chan,
174 struct ppc440spe_adma_desc_slot *iter)
175 {
176 for (; iter; iter = iter->hw_next)
177 print_cb(chan, iter->hw_desc);
178 }
179
180 static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
181 unsigned int src_cnt)
182 {
183 int i;
184
185 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
186 for (i = 0; i < src_cnt; i++)
187 pr_debug("\t0x%016llx ", src[i]);
188 pr_debug("dst:\n\t0x%016llx\n", dst);
189 }
190
191 static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
192 unsigned int src_cnt)
193 {
194 int i;
195
196 pr_debug("\n%s(%d):\nsrc: ", __func__, id);
197 for (i = 0; i < src_cnt; i++)
198 pr_debug("\t0x%016llx ", src[i]);
199 pr_debug("dst: ");
200 for (i = 0; i < 2; i++)
201 pr_debug("\t0x%016llx ", dst[i]);
202 }
203
204 static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
205 unsigned int src_cnt,
206 const unsigned char *scf)
207 {
208 int i;
209
210 pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
211 if (scf) {
212 for (i = 0; i < src_cnt; i++)
213 pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
214 } else {
215 for (i = 0; i < src_cnt; i++)
216 pr_debug("\t0x%016llx(no) ", src[i]);
217 }
218
219 pr_debug("dst: ");
220 for (i = 0; i < 2; i++)
221 pr_debug("\t0x%016llx ", src[src_cnt + i]);
222 }
223
224
225
226
227
228
229
230
231 static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
232 struct ppc440spe_adma_chan *chan)
233 {
234 struct xor_cb *p;
235
236 switch (chan->device->id) {
237 case PPC440SPE_XOR_ID:
238 p = desc->hw_desc;
239 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
240
241 p->cbc = XOR_CBCR_CBCE_BIT;
242 break;
243 case PPC440SPE_DMA0_ID:
244 case PPC440SPE_DMA1_ID:
245 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
246
247 set_bit(PPC440SPE_DESC_INT, &desc->flags);
248 break;
249 default:
250 printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
251 __func__);
252 break;
253 }
254 }
255
256
257
258
259
260 static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
261 {
262 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
263 desc->hw_next = NULL;
264 desc->src_cnt = 0;
265 desc->dst_cnt = 1;
266 }
267
268
269
270
271 static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
272 int src_cnt, unsigned long flags)
273 {
274 struct xor_cb *hw_desc = desc->hw_desc;
275
276 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
277 desc->hw_next = NULL;
278 desc->src_cnt = src_cnt;
279 desc->dst_cnt = 1;
280
281 hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
282 if (flags & DMA_PREP_INTERRUPT)
283
284 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
285 }
286
287
288
289
290
291 static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
292 int dst_cnt, int src_cnt, unsigned long flags)
293 {
294 struct xor_cb *hw_desc = desc->hw_desc;
295
296 memset(desc->hw_desc, 0, sizeof(struct xor_cb));
297 desc->hw_next = NULL;
298 desc->src_cnt = src_cnt;
299 desc->dst_cnt = dst_cnt;
300 memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
301 desc->descs_per_op = 0;
302
303 hw_desc->cbc = XOR_CBCR_TGT_BIT;
304 if (flags & DMA_PREP_INTERRUPT)
305
306 hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
307 }
308
309 #define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
310 #define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
311 #define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
312
313
314
315
316
317 static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
318 int dst_cnt, int src_cnt, unsigned long flags,
319 unsigned long op)
320 {
321 struct dma_cdb *hw_desc;
322 struct ppc440spe_adma_desc_slot *iter;
323 u8 dopc;
324
325
326 set_bits(op, &desc->flags);
327 desc->src_cnt = src_cnt;
328 desc->dst_cnt = dst_cnt;
329
330
331
332
333 dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
334 DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
335
336 list_for_each_entry(iter, &desc->group_list, chain_node) {
337 hw_desc = iter->hw_desc;
338 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
339
340 if (likely(!list_is_last(&iter->chain_node,
341 &desc->group_list))) {
342
343 iter->hw_next = list_entry(iter->chain_node.next,
344 struct ppc440spe_adma_desc_slot, chain_node);
345 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
346 } else {
347
348
349
350
351
352 iter->hw_next = NULL;
353 if (flags & DMA_PREP_INTERRUPT)
354 set_bit(PPC440SPE_DESC_INT, &iter->flags);
355 else
356 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
357 }
358 }
359
360
361 if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
362
363
364
365
366
367 iter = list_first_entry(&desc->group_list,
368 struct ppc440spe_adma_desc_slot,
369 chain_node);
370
371 if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
372 hw_desc = iter->hw_desc;
373 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
374 iter = list_first_entry(&iter->chain_node,
375 struct ppc440spe_adma_desc_slot,
376 chain_node);
377 }
378
379 if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
380 hw_desc = iter->hw_desc;
381 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
382 iter = list_first_entry(&iter->chain_node,
383 struct ppc440spe_adma_desc_slot,
384 chain_node);
385 }
386
387 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
388 hw_desc = iter->hw_desc;
389 hw_desc->opc = dopc;
390 }
391 } else {
392
393
394
395
396
397
398 iter = list_first_entry(&desc->group_list,
399 struct ppc440spe_adma_desc_slot,
400 chain_node);
401 hw_desc = iter->hw_desc;
402 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
403
404 if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
405 iter = list_first_entry(&iter->chain_node,
406 struct ppc440spe_adma_desc_slot,
407 chain_node);
408 hw_desc = iter->hw_desc;
409 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
410 }
411
412
413 if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
414 iter = list_first_entry(&iter->chain_node,
415 struct ppc440spe_adma_desc_slot,
416 chain_node);
417 list_for_each_entry_from(iter, &desc->group_list,
418 chain_node) {
419 hw_desc = iter->hw_desc;
420 hw_desc->opc = dopc;
421 }
422 }
423 }
424 }
425
426
427
428
429
430 static void ppc440spe_desc_init_dma01pqzero_sum(
431 struct ppc440spe_adma_desc_slot *desc,
432 int dst_cnt, int src_cnt)
433 {
434 struct dma_cdb *hw_desc;
435 struct ppc440spe_adma_desc_slot *iter;
436 int i = 0;
437 u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
438 DMA_CDB_OPC_MV_SG1_SG2;
439
440
441
442
443
444
445 iter = list_first_entry(&desc->group_list,
446 struct ppc440spe_adma_desc_slot, chain_node);
447 iter = list_entry(iter->chain_node.next,
448 struct ppc440spe_adma_desc_slot, chain_node);
449
450 if (dst_cnt > 1) {
451 iter = list_entry(iter->chain_node.next,
452 struct ppc440spe_adma_desc_slot, chain_node);
453 }
454
455 list_for_each_entry_from(iter, &desc->group_list, chain_node) {
456 hw_desc = iter->hw_desc;
457 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
458 iter->src_cnt = 0;
459 iter->dst_cnt = 0;
460
461
462
463
464
465
466 if (i++ < src_cnt)
467
468
469
470 hw_desc->opc = dopc;
471 else
472
473 hw_desc->opc = DMA_CDB_OPC_DCHECK128;
474
475 if (likely(!list_is_last(&iter->chain_node,
476 &desc->group_list))) {
477
478 iter->hw_next = list_entry(iter->chain_node.next,
479 struct ppc440spe_adma_desc_slot,
480 chain_node);
481 } else {
482
483
484
485
486
487 iter->hw_next = NULL;
488
489
490
491 set_bit(PPC440SPE_DESC_INT, &iter->flags);
492 }
493 }
494 desc->src_cnt = src_cnt;
495 desc->dst_cnt = dst_cnt;
496 }
497
498
499
500
501 static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
502 unsigned long flags)
503 {
504 struct dma_cdb *hw_desc = desc->hw_desc;
505
506 memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
507 desc->hw_next = NULL;
508 desc->src_cnt = 1;
509 desc->dst_cnt = 1;
510
511 if (flags & DMA_PREP_INTERRUPT)
512 set_bit(PPC440SPE_DESC_INT, &desc->flags);
513 else
514 clear_bit(PPC440SPE_DESC_INT, &desc->flags);
515
516 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
517 }
518
519
520
521
522 static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
523 struct ppc440spe_adma_chan *chan,
524 int src_idx, dma_addr_t addrh,
525 dma_addr_t addrl)
526 {
527 struct dma_cdb *dma_hw_desc;
528 struct xor_cb *xor_hw_desc;
529 phys_addr_t addr64, tmplow, tmphi;
530
531 switch (chan->device->id) {
532 case PPC440SPE_DMA0_ID:
533 case PPC440SPE_DMA1_ID:
534 if (!addrh) {
535 addr64 = addrl;
536 tmphi = (addr64 >> 32);
537 tmplow = (addr64 & 0xFFFFFFFF);
538 } else {
539 tmphi = addrh;
540 tmplow = addrl;
541 }
542 dma_hw_desc = desc->hw_desc;
543 dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
544 dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
545 break;
546 case PPC440SPE_XOR_ID:
547 xor_hw_desc = desc->hw_desc;
548 xor_hw_desc->ops[src_idx].l = addrl;
549 xor_hw_desc->ops[src_idx].h |= addrh;
550 break;
551 }
552 }
553
554
555
556
557 static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
558 struct ppc440spe_adma_chan *chan, u32 mult_index,
559 int sg_index, unsigned char mult_value)
560 {
561 struct dma_cdb *dma_hw_desc;
562 struct xor_cb *xor_hw_desc;
563 u32 *psgu;
564
565 switch (chan->device->id) {
566 case PPC440SPE_DMA0_ID:
567 case PPC440SPE_DMA1_ID:
568 dma_hw_desc = desc->hw_desc;
569
570 switch (sg_index) {
571
572
573
574 case DMA_CDB_SG_SRC:
575 psgu = &dma_hw_desc->sg1u;
576 break;
577
578
579
580 case DMA_CDB_SG_DST1:
581 psgu = &dma_hw_desc->sg2u;
582 break;
583 case DMA_CDB_SG_DST2:
584 psgu = &dma_hw_desc->sg3u;
585 break;
586 default:
587 BUG();
588 }
589
590 *psgu |= cpu_to_le32(mult_value << mult_index);
591 break;
592 case PPC440SPE_XOR_ID:
593 xor_hw_desc = desc->hw_desc;
594 break;
595 default:
596 BUG();
597 }
598 }
599
600
601
602
603 static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
604 struct ppc440spe_adma_chan *chan,
605 dma_addr_t addrh, dma_addr_t addrl,
606 u32 dst_idx)
607 {
608 struct dma_cdb *dma_hw_desc;
609 struct xor_cb *xor_hw_desc;
610 phys_addr_t addr64, tmphi, tmplow;
611 u32 *psgu, *psgl;
612
613 switch (chan->device->id) {
614 case PPC440SPE_DMA0_ID:
615 case PPC440SPE_DMA1_ID:
616 if (!addrh) {
617 addr64 = addrl;
618 tmphi = (addr64 >> 32);
619 tmplow = (addr64 & 0xFFFFFFFF);
620 } else {
621 tmphi = addrh;
622 tmplow = addrl;
623 }
624 dma_hw_desc = desc->hw_desc;
625
626 psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
627 psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
628
629 *psgl = cpu_to_le32((u32)tmplow);
630 *psgu |= cpu_to_le32((u32)tmphi);
631 break;
632 case PPC440SPE_XOR_ID:
633 xor_hw_desc = desc->hw_desc;
634 xor_hw_desc->cbtal = addrl;
635 xor_hw_desc->cbtah |= addrh;
636 break;
637 }
638 }
639
640
641
642
643
644 static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
645 struct ppc440spe_adma_chan *chan,
646 u32 byte_count)
647 {
648 struct dma_cdb *dma_hw_desc;
649 struct xor_cb *xor_hw_desc;
650
651 switch (chan->device->id) {
652 case PPC440SPE_DMA0_ID:
653 case PPC440SPE_DMA1_ID:
654 dma_hw_desc = desc->hw_desc;
655 dma_hw_desc->cnt = cpu_to_le32(byte_count);
656 break;
657 case PPC440SPE_XOR_ID:
658 xor_hw_desc = desc->hw_desc;
659 xor_hw_desc->cbbc = byte_count;
660 break;
661 }
662 }
663
664
665
666
667 static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
668 {
669
670
671
672
673 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
674 }
675
676
677
678
679 static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
680 struct ppc440spe_adma_chan *chan, u8 *qword)
681 {
682 struct dma_cdb *dma_hw_desc;
683
684 switch (chan->device->id) {
685 case PPC440SPE_DMA0_ID:
686 case PPC440SPE_DMA1_ID:
687 dma_hw_desc = desc->hw_desc;
688 iowrite32(qword[0], &dma_hw_desc->sg3l);
689 iowrite32(qword[4], &dma_hw_desc->sg3u);
690 iowrite32(qword[8], &dma_hw_desc->sg2l);
691 iowrite32(qword[12], &dma_hw_desc->sg2u);
692 break;
693 default:
694 BUG();
695 }
696 }
697
698
699
700
701 static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
702 struct ppc440spe_adma_desc_slot *next_desc)
703 {
704 struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
705
706 if (unlikely(!next_desc || !(next_desc->phys))) {
707 printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
708 __func__, next_desc,
709 next_desc ? next_desc->phys : 0);
710 BUG();
711 }
712
713 xor_hw_desc->cbs = 0;
714 xor_hw_desc->cblal = next_desc->phys;
715 xor_hw_desc->cblah = 0;
716 xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
717 }
718
719
720
721
722
723 static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
724 struct ppc440spe_adma_desc_slot *prev_desc,
725 struct ppc440spe_adma_desc_slot *next_desc)
726 {
727 unsigned long flags;
728 struct ppc440spe_adma_desc_slot *tail = next_desc;
729
730 if (unlikely(!prev_desc || !next_desc ||
731 (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
732
733
734
735
736 printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
737 "prev->hw_next=0x%p\n", __func__, prev_desc,
738 next_desc, prev_desc ? prev_desc->hw_next : 0);
739 BUG();
740 }
741
742 local_irq_save(flags);
743
744
745 prev_desc->hw_next = next_desc;
746
747 switch (chan->device->id) {
748 case PPC440SPE_DMA0_ID:
749 case PPC440SPE_DMA1_ID:
750 break;
751 case PPC440SPE_XOR_ID:
752
753 while (tail->hw_next)
754 tail = tail->hw_next;
755 xor_last_linked = tail;
756
757 if (prev_desc == xor_last_submit)
758
759 break;
760 ppc440spe_xor_set_link(prev_desc, next_desc);
761 break;
762 }
763
764 local_irq_restore(flags);
765 }
766
767
768
769
770
771 static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
772 struct ppc440spe_adma_chan *chan)
773 {
774 if (!desc->hw_next)
775 return 0;
776
777 return desc->hw_next->phys;
778 }
779
780
781
782
783 static inline int ppc440spe_desc_is_aligned(
784 struct ppc440spe_adma_desc_slot *desc, int num_slots)
785 {
786 return (desc->idx & (num_slots - 1)) ? 0 : 1;
787 }
788
789
790
791
792
793 static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
794 int *slots_per_op)
795 {
796 int slot_cnt;
797
798
799 slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
800
801 if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
802 return slot_cnt;
803
804 printk(KERN_ERR "%s: len %d > max %d !!\n",
805 __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
806 BUG();
807 return slot_cnt;
808 }
809
810
811
812
813
814 static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
815 int src_cnt, size_t len)
816 {
817 signed long long order = 0;
818 int state = 0;
819 int addr_count = 0;
820 int i;
821 for (i = 1; i < src_cnt; i++) {
822 dma_addr_t cur_addr = srcs[i];
823 dma_addr_t old_addr = srcs[i-1];
824 switch (state) {
825 case 0:
826 if (cur_addr == old_addr + len) {
827
828 order = 1;
829 state = 1;
830 if (i == src_cnt-1)
831 addr_count++;
832 } else if (old_addr == cur_addr + len) {
833
834 order = -1;
835 state = 1;
836 if (i == src_cnt-1)
837 addr_count++;
838 } else {
839 state = 3;
840 }
841 break;
842 case 1:
843 if (i == src_cnt-2 || (order == -1
844 && cur_addr != old_addr - len)) {
845 order = 0;
846 state = 0;
847 addr_count++;
848 } else if (cur_addr == old_addr + len*order) {
849 state = 2;
850 if (i == src_cnt-1)
851 addr_count++;
852 } else if (cur_addr == old_addr + 2*len) {
853 state = 2;
854 if (i == src_cnt-1)
855 addr_count++;
856 } else if (cur_addr == old_addr + 3*len) {
857 state = 2;
858 if (i == src_cnt-1)
859 addr_count++;
860 } else {
861 order = 0;
862 state = 0;
863 addr_count++;
864 }
865 break;
866 case 2:
867 order = 0;
868 state = 0;
869 addr_count++;
870 break;
871 }
872 if (state == 3)
873 break;
874 }
875 if (src_cnt <= 1 || (state != 1 && state != 2)) {
876 pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
877 __func__, src_cnt, state, addr_count, order);
878 for (i = 0; i < src_cnt; i++)
879 pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
880 BUG();
881 }
882
883 return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
884 }
885
886
887
888
889
890
891 static u32
892 ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
893 static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
894
895
896
897
898 static void ppc440spe_adma_device_clear_eot_status(
899 struct ppc440spe_adma_chan *chan)
900 {
901 struct dma_regs *dma_reg;
902 struct xor_regs *xor_reg;
903 u8 *p = chan->device->dma_desc_pool_virt;
904 struct dma_cdb *cdb;
905 u32 rv, i;
906
907 switch (chan->device->id) {
908 case PPC440SPE_DMA0_ID:
909 case PPC440SPE_DMA1_ID:
910
911 dma_reg = chan->device->dma_reg;
912 while ((rv = ioread32(&dma_reg->csfpl))) {
913 i = rv & DMA_CDB_ADDR_MSK;
914 cdb = (struct dma_cdb *)&p[i -
915 (u32)chan->device->dma_desc_pool];
916
917
918
919
920 cdb->opc = 0;
921
922 if (test_bit(PPC440SPE_RXOR_RUN,
923 &ppc440spe_rxor_state)) {
924
925
926
927
928
929 if (le32_to_cpu(cdb->sg1u) &
930 DMA_CUED_XOR_BASE) {
931
932 clear_bit(PPC440SPE_RXOR_RUN,
933 &ppc440spe_rxor_state);
934 }
935 }
936
937 if (rv & DMA_CDB_STATUS_MSK) {
938
939
940 struct ppc440spe_adma_desc_slot *iter;
941 dma_addr_t phys = rv & ~DMA_CDB_MSK;
942
943
944
945
946
947 list_for_each_entry(iter, &chan->chain,
948 chain_node) {
949 if (iter->phys == phys)
950 break;
951 }
952
953
954
955
956 BUG_ON(&iter->chain_node == &chan->chain);
957
958 if (iter->xor_check_result) {
959 if (test_bit(PPC440SPE_DESC_PCHECK,
960 &iter->flags)) {
961 *iter->xor_check_result |=
962 SUM_CHECK_P_RESULT;
963 } else
964 if (test_bit(PPC440SPE_DESC_QCHECK,
965 &iter->flags)) {
966 *iter->xor_check_result |=
967 SUM_CHECK_Q_RESULT;
968 } else
969 BUG();
970 }
971 }
972 }
973
974 rv = ioread32(&dma_reg->dsts);
975 if (rv) {
976 pr_err("DMA%d err status: 0x%x\n",
977 chan->device->id, rv);
978
979 iowrite32(rv, &dma_reg->dsts);
980 }
981 break;
982 case PPC440SPE_XOR_ID:
983
984 xor_reg = chan->device->xor_reg;
985 rv = ioread32be(&xor_reg->sr);
986 iowrite32be(rv, &xor_reg->sr);
987
988 if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
989 if (rv & XOR_IE_RPTIE_BIT) {
990
991
992
993 u32 val = ioread32be(&xor_reg->ccbalr);
994
995 iowrite32be(val, &xor_reg->cblalr);
996
997 val = ioread32be(&xor_reg->crsr);
998 iowrite32be(val | XOR_CRSR_XAE_BIT,
999 &xor_reg->crsr);
1000 } else
1001 pr_err("XOR ERR 0x%x status\n", rv);
1002 break;
1003 }
1004
1005
1006
1007
1008 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
1009 do_xor_refetch)
1010 ppc440spe_chan_append(chan);
1011 break;
1012 }
1013 }
1014
1015
1016
1017
1018 static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
1019 {
1020 struct dma_regs *dma_reg;
1021 struct xor_regs *xor_reg;
1022 int busy = 0;
1023
1024 switch (chan->device->id) {
1025 case PPC440SPE_DMA0_ID:
1026 case PPC440SPE_DMA1_ID:
1027 dma_reg = chan->device->dma_reg;
1028
1029
1030
1031 if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
1032 ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
1033 busy = 1;
1034 break;
1035 case PPC440SPE_XOR_ID:
1036
1037
1038 xor_reg = chan->device->xor_reg;
1039 busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
1040 break;
1041 }
1042
1043 return busy;
1044 }
1045
1046
1047
1048
1049 static void ppc440spe_chan_set_first_xor_descriptor(
1050 struct ppc440spe_adma_chan *chan,
1051 struct ppc440spe_adma_desc_slot *next_desc)
1052 {
1053 struct xor_regs *xor_reg = chan->device->xor_reg;
1054
1055 if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
1056 printk(KERN_INFO "%s: Warn: XORcore is running "
1057 "when try to set the first CDB!\n",
1058 __func__);
1059
1060 xor_last_submit = xor_last_linked = next_desc;
1061
1062 iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
1063
1064 iowrite32be(next_desc->phys, &xor_reg->cblalr);
1065 iowrite32be(0, &xor_reg->cblahr);
1066 iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
1067 &xor_reg->cbcr);
1068
1069 chan->hw_chain_inited = 1;
1070 }
1071
1072
1073
1074
1075
1076 static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
1077 struct ppc440spe_adma_desc_slot *desc)
1078 {
1079 u32 pcdb;
1080 struct dma_regs *dma_reg = chan->device->dma_reg;
1081
1082 pcdb = desc->phys;
1083 if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
1084 pcdb |= DMA_CDB_NO_INT;
1085
1086 chan_last_sub[chan->device->id] = desc;
1087
1088 ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
1089
1090 iowrite32(pcdb, &dma_reg->cpfpl);
1091 }
1092
1093
1094
1095
1096 static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
1097 {
1098 struct xor_regs *xor_reg;
1099 struct ppc440spe_adma_desc_slot *iter;
1100 struct xor_cb *xcb;
1101 u32 cur_desc;
1102 unsigned long flags;
1103
1104 local_irq_save(flags);
1105
1106 switch (chan->device->id) {
1107 case PPC440SPE_DMA0_ID:
1108 case PPC440SPE_DMA1_ID:
1109 cur_desc = ppc440spe_chan_get_current_descriptor(chan);
1110
1111 if (likely(cur_desc)) {
1112 iter = chan_last_sub[chan->device->id];
1113 BUG_ON(!iter);
1114 } else {
1115
1116 iter = chan_first_cdb[chan->device->id];
1117 BUG_ON(!iter);
1118 ppc440spe_dma_put_desc(chan, iter);
1119 chan->hw_chain_inited = 1;
1120 }
1121
1122
1123 if (!iter->hw_next)
1124 break;
1125
1126
1127 list_for_each_entry_continue(iter, &chan->chain, chain_node) {
1128 ppc440spe_dma_put_desc(chan, iter);
1129 if (!iter->hw_next)
1130 break;
1131 }
1132 break;
1133 case PPC440SPE_XOR_ID:
1134
1135 if (!xor_last_submit->hw_next)
1136 break;
1137
1138 xor_reg = chan->device->xor_reg;
1139
1140
1141
1142
1143
1144 xcb = xor_last_linked->hw_desc;
1145 xcb->cbc |= XOR_CBCR_CBCE_BIT;
1146
1147 if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
1148
1149 do_xor_refetch = 0;
1150 ppc440spe_xor_set_link(xor_last_submit,
1151 xor_last_submit->hw_next);
1152
1153 ADMA_LL_DBG(print_cb_list(chan,
1154 xor_last_submit->hw_next));
1155
1156 xor_last_submit = xor_last_linked;
1157 iowrite32be(ioread32be(&xor_reg->crsr) |
1158 XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
1159 &xor_reg->crsr);
1160 } else {
1161
1162 do_xor_refetch = 1;
1163 }
1164
1165 break;
1166 }
1167
1168 local_irq_restore(flags);
1169 }
1170
1171
1172
1173
1174 static u32
1175 ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
1176 {
1177 struct dma_regs *dma_reg;
1178 struct xor_regs *xor_reg;
1179
1180 if (unlikely(!chan->hw_chain_inited))
1181
1182 return 0;
1183
1184 switch (chan->device->id) {
1185 case PPC440SPE_DMA0_ID:
1186 case PPC440SPE_DMA1_ID:
1187 dma_reg = chan->device->dma_reg;
1188 return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
1189 case PPC440SPE_XOR_ID:
1190 xor_reg = chan->device->xor_reg;
1191 return ioread32be(&xor_reg->ccbalr);
1192 }
1193 return 0;
1194 }
1195
1196
1197
1198
1199 static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
1200 {
1201 struct xor_regs *xor_reg;
1202
1203 switch (chan->device->id) {
1204 case PPC440SPE_DMA0_ID:
1205 case PPC440SPE_DMA1_ID:
1206
1207 break;
1208 case PPC440SPE_XOR_ID:
1209
1210 xor_reg = chan->device->xor_reg;
1211
1212
1213 iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
1214 &xor_reg->crsr);
1215 break;
1216 }
1217 }
1218
1219
1220
1221
1222
1223 static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
1224 static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
1225
1226 static dma_cookie_t
1227 ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
1228
1229 static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
1230 dma_addr_t addr, int index);
1231 static void
1232 ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
1233 dma_addr_t addr, int index);
1234
1235 static void
1236 ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
1237 dma_addr_t *paddr, unsigned long flags);
1238 static void
1239 ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
1240 dma_addr_t addr, int index);
1241 static void
1242 ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
1243 unsigned char mult, int index, int dst_pos);
1244 static void
1245 ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
1246 dma_addr_t paddr, dma_addr_t qaddr);
1247
1248 static struct page *ppc440spe_rxor_srcs[32];
1249
1250
1251
1252
1253 static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
1254 {
1255 int i, order = 0, state = 0;
1256 int idx = 0;
1257
1258 if (unlikely(!(src_cnt > 1)))
1259 return 0;
1260
1261 BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
1262
1263
1264 for (i = 0; i < src_cnt; i++) {
1265 if (!srcs[i])
1266 continue;
1267 ppc440spe_rxor_srcs[idx++] = srcs[i];
1268 }
1269 src_cnt = idx;
1270
1271 for (i = 1; i < src_cnt; i++) {
1272 char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
1273 char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
1274
1275 switch (state) {
1276 case 0:
1277 if (cur_addr == old_addr + len) {
1278
1279 order = 1;
1280 state = 1;
1281 } else if (old_addr == cur_addr + len) {
1282
1283 order = -1;
1284 state = 1;
1285 } else
1286 goto out;
1287 break;
1288 case 1:
1289 if ((i == src_cnt - 2) ||
1290 (order == -1 && cur_addr != old_addr - len)) {
1291 order = 0;
1292 state = 0;
1293 } else if ((cur_addr == old_addr + len * order) ||
1294 (cur_addr == old_addr + 2 * len) ||
1295 (cur_addr == old_addr + 3 * len)) {
1296 state = 2;
1297 } else {
1298 order = 0;
1299 state = 0;
1300 }
1301 break;
1302 case 2:
1303 order = 0;
1304 state = 0;
1305 break;
1306 }
1307 }
1308
1309 out:
1310 if (state == 1 || state == 2)
1311 return 1;
1312
1313 return 0;
1314 }
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 static int ppc440spe_adma_estimate(struct dma_chan *chan,
1329 enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
1330 struct page **src_lst, int src_cnt, size_t src_sz)
1331 {
1332 int ef = 1;
1333
1334 if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
1335
1336
1337
1338 if (unlikely(!ppc440spe_r6_enabled))
1339 return -1;
1340 }
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351 if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
1352
1353 if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
1354 ef = 0;
1355 else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
1356 ef = 3;
1357 else
1358 ef = 0;
1359 }
1360
1361
1362 if (likely(ef) &&
1363 !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
1364 ef++;
1365
1366 return ef;
1367 }
1368
1369 struct dma_chan *
1370 ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
1371 struct page **dst_lst, int dst_cnt, struct page **src_lst,
1372 int src_cnt, size_t src_sz)
1373 {
1374 struct dma_chan *best_chan = NULL;
1375 struct ppc_dma_chan_ref *ref;
1376 int best_rank = -1;
1377
1378 if (unlikely(!src_sz))
1379 return NULL;
1380 if (src_sz > PAGE_SIZE) {
1381
1382
1383
1384
1385
1386 switch (cap) {
1387 case DMA_PQ:
1388 if (src_cnt == 1 && dst_lst[1] == src_lst[0])
1389 return NULL;
1390 if (src_cnt == 2 && dst_lst[1] == src_lst[1])
1391 return NULL;
1392 break;
1393 case DMA_PQ_VAL:
1394 case DMA_XOR_VAL:
1395 return NULL;
1396 default:
1397 break;
1398 }
1399 }
1400
1401 list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
1402 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
1403 int rank;
1404
1405 rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
1406 dst_cnt, src_lst, src_cnt, src_sz);
1407 if (rank > best_rank) {
1408 best_rank = rank;
1409 best_chan = ref->chan;
1410 }
1411 }
1412 }
1413
1414 return best_chan;
1415 }
1416 EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
1417
1418
1419
1420
1421
1422 static struct ppc440spe_adma_desc_slot *
1423 ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
1424 {
1425 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
1426 int i = 0;
1427
1428 if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
1429 printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
1430 __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
1431 BUG();
1432 }
1433
1434 list_for_each_entry(iter, &tdesc->group_list, chain_node) {
1435 if (i++ == entry_idx)
1436 break;
1437 }
1438 return iter;
1439 }
1440
1441
1442
1443
1444
1445
1446 static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
1447 struct ppc440spe_adma_chan *chan)
1448 {
1449 int stride = slot->slots_per_op;
1450
1451 while (stride--) {
1452 slot->slots_per_op = 0;
1453 slot = list_entry(slot->slot_node.next,
1454 struct ppc440spe_adma_desc_slot,
1455 slot_node);
1456 }
1457 }
1458
1459
1460
1461
1462
1463 static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
1464 struct ppc440spe_adma_desc_slot *desc,
1465 struct ppc440spe_adma_chan *chan,
1466 dma_cookie_t cookie)
1467 {
1468 BUG_ON(desc->async_tx.cookie < 0);
1469 if (desc->async_tx.cookie > 0) {
1470 cookie = desc->async_tx.cookie;
1471 desc->async_tx.cookie = 0;
1472
1473 dma_descriptor_unmap(&desc->async_tx);
1474
1475
1476
1477 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
1478 }
1479
1480
1481 dma_run_dependencies(&desc->async_tx);
1482
1483 return cookie;
1484 }
1485
1486
1487
1488
1489 static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
1490 struct ppc440spe_adma_chan *chan)
1491 {
1492
1493
1494
1495 if (!async_tx_test_ack(&desc->async_tx))
1496 return 0;
1497
1498
1499
1500
1501 if (list_is_last(&desc->chain_node, &chan->chain) ||
1502 desc->phys == ppc440spe_chan_get_current_descriptor(chan))
1503 return 1;
1504
1505 if (chan->device->id != PPC440SPE_XOR_ID) {
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515 struct dma_cdb *cdb = desc->hw_desc;
1516 if (cdb->opc == DMA_CDB_OPC_DCHECK128)
1517 return 1;
1518 }
1519
1520 dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
1521 desc->phys, desc->idx, desc->slots_per_op);
1522
1523 list_del(&desc->chain_node);
1524 ppc440spe_adma_free_slots(desc, chan);
1525 return 0;
1526 }
1527
1528
1529
1530
1531
1532
1533
1534
1535 static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1536 {
1537 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
1538 dma_cookie_t cookie = 0;
1539 u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
1540 int busy = ppc440spe_chan_is_busy(chan);
1541 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
1542
1543 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
1544 chan->device->id, __func__);
1545
1546 if (!current_desc) {
1547
1548
1549
1550 return;
1551 }
1552
1553
1554
1555
1556 list_for_each_entry_safe(iter, _iter, &chan->chain,
1557 chain_node) {
1558 dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
1559 "busy: %d this_desc: %#llx next_desc: %#x "
1560 "cur: %#x ack: %d\n",
1561 iter->async_tx.cookie, iter->idx, busy, iter->phys,
1562 ppc440spe_desc_get_link(iter, chan), current_desc,
1563 async_tx_test_ack(&iter->async_tx));
1564 prefetch(_iter);
1565 prefetch(&_iter->async_tx);
1566
1567
1568
1569
1570
1571 if (seen_current)
1572 break;
1573
1574
1575
1576
1577
1578 if (iter->phys == current_desc) {
1579 BUG_ON(seen_current++);
1580 if (busy || ppc440spe_desc_get_link(iter, chan)) {
1581
1582
1583
1584 break;
1585 }
1586 }
1587
1588
1589 if (!slot_cnt && !slots_per_op) {
1590 slot_cnt = iter->slot_cnt;
1591 slots_per_op = iter->slots_per_op;
1592 if (slot_cnt <= slots_per_op) {
1593 slot_cnt = 0;
1594 slots_per_op = 0;
1595 }
1596 }
1597
1598 if (slot_cnt) {
1599 if (!group_start)
1600 group_start = iter;
1601 slot_cnt -= slots_per_op;
1602 }
1603
1604
1605 if (slots_per_op != 0 && slot_cnt == 0) {
1606 struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
1607 int end_of_chain = 0;
1608
1609
1610 slot_cnt = group_start->slot_cnt;
1611 grp_iter = group_start;
1612 list_for_each_entry_safe_from(grp_iter, _grp_iter,
1613 &chan->chain, chain_node) {
1614
1615 cookie = ppc440spe_adma_run_tx_complete_actions(
1616 grp_iter, chan, cookie);
1617
1618 slot_cnt -= slots_per_op;
1619 end_of_chain = ppc440spe_adma_clean_slot(
1620 grp_iter, chan);
1621 if (end_of_chain && slot_cnt) {
1622
1623 if (cookie > 0)
1624 chan->common.completed_cookie = cookie;
1625 return;
1626 }
1627
1628 if (slot_cnt == 0 || end_of_chain)
1629 break;
1630 }
1631
1632
1633 BUG_ON(slot_cnt);
1634
1635 slots_per_op = 0;
1636 group_start = NULL;
1637 if (end_of_chain)
1638 break;
1639 else
1640 continue;
1641 } else if (slots_per_op)
1642 continue;
1643
1644 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
1645 cookie);
1646
1647 if (ppc440spe_adma_clean_slot(iter, chan))
1648 break;
1649 }
1650
1651 BUG_ON(!seen_current);
1652
1653 if (cookie > 0) {
1654 chan->common.completed_cookie = cookie;
1655 pr_debug("\tcompleted cookie %d\n", cookie);
1656 }
1657
1658 }
1659
1660
1661
1662
1663 static void ppc440spe_adma_tasklet(unsigned long data)
1664 {
1665 struct ppc440spe_adma_chan *chan = (struct ppc440spe_adma_chan *) data;
1666
1667 spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
1668 __ppc440spe_adma_slot_cleanup(chan);
1669 spin_unlock(&chan->lock);
1670 }
1671
1672
1673
1674
1675 static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
1676 {
1677 spin_lock_bh(&chan->lock);
1678 __ppc440spe_adma_slot_cleanup(chan);
1679 spin_unlock_bh(&chan->lock);
1680 }
1681
1682
1683
1684
1685 static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
1686 struct ppc440spe_adma_chan *chan, int num_slots,
1687 int slots_per_op)
1688 {
1689 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
1690 struct ppc440spe_adma_desc_slot *alloc_start = NULL;
1691 struct list_head chain = LIST_HEAD_INIT(chain);
1692 int slots_found, retry = 0;
1693
1694
1695 BUG_ON(!num_slots || !slots_per_op);
1696
1697
1698
1699
1700 retry:
1701 slots_found = 0;
1702 if (retry == 0)
1703 iter = chan->last_used;
1704 else
1705 iter = list_entry(&chan->all_slots,
1706 struct ppc440spe_adma_desc_slot,
1707 slot_node);
1708 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
1709 slot_node) {
1710 prefetch(_iter);
1711 prefetch(&_iter->async_tx);
1712 if (iter->slots_per_op) {
1713 slots_found = 0;
1714 continue;
1715 }
1716
1717
1718 if (!slots_found++)
1719 alloc_start = iter;
1720
1721 if (slots_found == num_slots) {
1722 struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
1723 struct ppc440spe_adma_desc_slot *last_used = NULL;
1724
1725 iter = alloc_start;
1726 while (num_slots) {
1727 int i;
1728
1729 if (num_slots != slots_per_op)
1730 async_tx_ack(&iter->async_tx);
1731
1732 list_add_tail(&iter->chain_node, &chain);
1733 alloc_tail = iter;
1734 iter->async_tx.cookie = 0;
1735 iter->hw_next = NULL;
1736 iter->flags = 0;
1737 iter->slot_cnt = num_slots;
1738 iter->xor_check_result = NULL;
1739 for (i = 0; i < slots_per_op; i++) {
1740 iter->slots_per_op = slots_per_op - i;
1741 last_used = iter;
1742 iter = list_entry(iter->slot_node.next,
1743 struct ppc440spe_adma_desc_slot,
1744 slot_node);
1745 }
1746 num_slots -= slots_per_op;
1747 }
1748 alloc_tail->group_head = alloc_start;
1749 alloc_tail->async_tx.cookie = -EBUSY;
1750 list_splice(&chain, &alloc_tail->group_list);
1751 chan->last_used = last_used;
1752 return alloc_tail;
1753 }
1754 }
1755 if (!retry++)
1756 goto retry;
1757
1758
1759 tasklet_schedule(&chan->irq_tasklet);
1760 return NULL;
1761 }
1762
1763
1764
1765
1766 static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
1767 {
1768 struct ppc440spe_adma_chan *ppc440spe_chan;
1769 struct ppc440spe_adma_desc_slot *slot = NULL;
1770 char *hw_desc;
1771 int i, db_sz;
1772 int init;
1773
1774 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1775 init = ppc440spe_chan->slots_allocated ? 0 : 1;
1776 chan->chan_id = ppc440spe_chan->device->id;
1777
1778
1779 i = ppc440spe_chan->slots_allocated;
1780 if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
1781 db_sz = sizeof(struct dma_cdb);
1782 else
1783 db_sz = sizeof(struct xor_cb);
1784
1785 for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
1786 slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
1787 GFP_KERNEL);
1788 if (!slot) {
1789 printk(KERN_INFO "SPE ADMA Channel only initialized"
1790 " %d descriptor slots", i--);
1791 break;
1792 }
1793
1794 hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
1795 slot->hw_desc = (void *) &hw_desc[i * db_sz];
1796 dma_async_tx_descriptor_init(&slot->async_tx, chan);
1797 slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
1798 INIT_LIST_HEAD(&slot->chain_node);
1799 INIT_LIST_HEAD(&slot->slot_node);
1800 INIT_LIST_HEAD(&slot->group_list);
1801 slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
1802 slot->idx = i;
1803
1804 spin_lock_bh(&ppc440spe_chan->lock);
1805 ppc440spe_chan->slots_allocated++;
1806 list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
1807 spin_unlock_bh(&ppc440spe_chan->lock);
1808 }
1809
1810 if (i && !ppc440spe_chan->last_used) {
1811 ppc440spe_chan->last_used =
1812 list_entry(ppc440spe_chan->all_slots.next,
1813 struct ppc440spe_adma_desc_slot,
1814 slot_node);
1815 }
1816
1817 dev_dbg(ppc440spe_chan->device->common.dev,
1818 "ppc440spe adma%d: allocated %d descriptor slots\n",
1819 ppc440spe_chan->device->id, i);
1820
1821
1822 if (init) {
1823 switch (ppc440spe_chan->device->id) {
1824 case PPC440SPE_DMA0_ID:
1825 case PPC440SPE_DMA1_ID:
1826 ppc440spe_chan->hw_chain_inited = 0;
1827
1828 if (!ppc440spe_r6_tchan)
1829 ppc440spe_r6_tchan = ppc440spe_chan;
1830 break;
1831 case PPC440SPE_XOR_ID:
1832 ppc440spe_chan_start_null_xor(ppc440spe_chan);
1833 break;
1834 default:
1835 BUG();
1836 }
1837 ppc440spe_chan->needs_unmap = 1;
1838 }
1839
1840 return (i > 0) ? i : -ENOMEM;
1841 }
1842
1843
1844
1845
1846 static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
1847 u8 xor_arg_no, u32 mask)
1848 {
1849 struct xor_cb *xcb = desc->hw_desc;
1850
1851 xcb->ops[xor_arg_no].h |= mask;
1852 }
1853
1854
1855
1856
1857 static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
1858 u8 xor_arg_no, dma_addr_t addr)
1859 {
1860 struct xor_cb *xcb = desc->hw_desc;
1861
1862 xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
1863 xcb->ops[xor_arg_no].l = addr;
1864 }
1865
1866
1867
1868
1869 static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
1870 u8 xor_arg_no, u8 idx, u8 mult)
1871 {
1872 struct xor_cb *xcb = desc->hw_desc;
1873
1874 xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
1875 }
1876
1877
1878
1879
1880
1881 static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
1882 {
1883 dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
1884 chan->device->id, chan->pending);
1885
1886 if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
1887 chan->pending = 0;
1888 ppc440spe_chan_append(chan);
1889 }
1890 }
1891
1892
1893
1894
1895
1896
1897 static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
1898 {
1899 struct ppc440spe_adma_desc_slot *sw_desc;
1900 struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
1901 struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
1902 int slot_cnt;
1903 int slots_per_op;
1904 dma_cookie_t cookie;
1905
1906 sw_desc = tx_to_ppc440spe_adma_slot(tx);
1907
1908 group_start = sw_desc->group_head;
1909 slot_cnt = group_start->slot_cnt;
1910 slots_per_op = group_start->slots_per_op;
1911
1912 spin_lock_bh(&chan->lock);
1913 cookie = dma_cookie_assign(tx);
1914
1915 if (unlikely(list_empty(&chan->chain))) {
1916
1917 list_splice_init(&sw_desc->group_list, &chan->chain);
1918 chan_first_cdb[chan->device->id] = group_start;
1919 } else {
1920
1921 old_chain_tail = list_entry(chan->chain.prev,
1922 struct ppc440spe_adma_desc_slot,
1923 chain_node);
1924 list_splice_init(&sw_desc->group_list,
1925 &old_chain_tail->chain_node);
1926
1927 ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
1928 }
1929
1930
1931 chan->pending += slot_cnt / slots_per_op;
1932 ppc440spe_adma_check_threshold(chan);
1933 spin_unlock_bh(&chan->lock);
1934
1935 dev_dbg(chan->device->common.dev,
1936 "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
1937 chan->device->id, __func__,
1938 sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
1939
1940 return cookie;
1941 }
1942
1943
1944
1945
1946 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
1947 struct dma_chan *chan, unsigned long flags)
1948 {
1949 struct ppc440spe_adma_chan *ppc440spe_chan;
1950 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
1951 int slot_cnt, slots_per_op;
1952
1953 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1954
1955 dev_dbg(ppc440spe_chan->device->common.dev,
1956 "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
1957 __func__);
1958
1959 spin_lock_bh(&ppc440spe_chan->lock);
1960 slot_cnt = slots_per_op = 1;
1961 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
1962 slots_per_op);
1963 if (sw_desc) {
1964 group_start = sw_desc->group_head;
1965 ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
1966 group_start->unmap_len = 0;
1967 sw_desc->async_tx.flags = flags;
1968 }
1969 spin_unlock_bh(&ppc440spe_chan->lock);
1970
1971 return sw_desc ? &sw_desc->async_tx : NULL;
1972 }
1973
1974
1975
1976
1977 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
1978 struct dma_chan *chan, dma_addr_t dma_dest,
1979 dma_addr_t dma_src, size_t len, unsigned long flags)
1980 {
1981 struct ppc440spe_adma_chan *ppc440spe_chan;
1982 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
1983 int slot_cnt, slots_per_op;
1984
1985 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
1986
1987 if (unlikely(!len))
1988 return NULL;
1989
1990 BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
1991
1992 spin_lock_bh(&ppc440spe_chan->lock);
1993
1994 dev_dbg(ppc440spe_chan->device->common.dev,
1995 "ppc440spe adma%d: %s len: %u int_en %d\n",
1996 ppc440spe_chan->device->id, __func__, len,
1997 flags & DMA_PREP_INTERRUPT ? 1 : 0);
1998 slot_cnt = slots_per_op = 1;
1999 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2000 slots_per_op);
2001 if (sw_desc) {
2002 group_start = sw_desc->group_head;
2003 ppc440spe_desc_init_memcpy(group_start, flags);
2004 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2005 ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
2006 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2007 sw_desc->unmap_len = len;
2008 sw_desc->async_tx.flags = flags;
2009 }
2010 spin_unlock_bh(&ppc440spe_chan->lock);
2011
2012 return sw_desc ? &sw_desc->async_tx : NULL;
2013 }
2014
2015
2016
2017
2018 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
2019 struct dma_chan *chan, dma_addr_t dma_dest,
2020 dma_addr_t *dma_src, u32 src_cnt, size_t len,
2021 unsigned long flags)
2022 {
2023 struct ppc440spe_adma_chan *ppc440spe_chan;
2024 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
2025 int slot_cnt, slots_per_op;
2026
2027 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2028
2029 ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
2030 dma_dest, dma_src, src_cnt));
2031 if (unlikely(!len))
2032 return NULL;
2033 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
2034
2035 dev_dbg(ppc440spe_chan->device->common.dev,
2036 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2037 ppc440spe_chan->device->id, __func__, src_cnt, len,
2038 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2039
2040 spin_lock_bh(&ppc440spe_chan->lock);
2041 slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
2042 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2043 slots_per_op);
2044 if (sw_desc) {
2045 group_start = sw_desc->group_head;
2046 ppc440spe_desc_init_xor(group_start, src_cnt, flags);
2047 ppc440spe_adma_set_dest(group_start, dma_dest, 0);
2048 while (src_cnt--)
2049 ppc440spe_adma_memcpy_xor_set_src(group_start,
2050 dma_src[src_cnt], src_cnt);
2051 ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
2052 sw_desc->unmap_len = len;
2053 sw_desc->async_tx.flags = flags;
2054 }
2055 spin_unlock_bh(&ppc440spe_chan->lock);
2056
2057 return sw_desc ? &sw_desc->async_tx : NULL;
2058 }
2059
2060 static inline void
2061 ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
2062 int src_cnt);
2063 static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
2064
2065
2066
2067
2068 static void ppc440spe_adma_init_dma2rxor_slot(
2069 struct ppc440spe_adma_desc_slot *desc,
2070 dma_addr_t *src, int src_cnt)
2071 {
2072 int i;
2073
2074
2075 for (i = 0; i < src_cnt; i++) {
2076 ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
2077 desc->src_cnt, (u32)src[i]);
2078 }
2079 }
2080
2081
2082
2083
2084
2085 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
2086 struct ppc440spe_adma_chan *ppc440spe_chan,
2087 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2088 const unsigned char *scf, size_t len, unsigned long flags)
2089 {
2090 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2091 unsigned long op = 0;
2092 int slot_cnt;
2093
2094 set_bit(PPC440SPE_DESC_WXOR, &op);
2095 slot_cnt = 2;
2096
2097 spin_lock_bh(&ppc440spe_chan->lock);
2098
2099
2100 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2101 if (sw_desc) {
2102 struct ppc440spe_adma_chan *chan;
2103 struct ppc440spe_adma_desc_slot *iter;
2104 struct dma_cdb *hw_desc;
2105
2106 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2107 set_bits(op, &sw_desc->flags);
2108 sw_desc->src_cnt = src_cnt;
2109 sw_desc->dst_cnt = dst_cnt;
2110
2111
2112
2113 iter = list_first_entry(&sw_desc->group_list,
2114 struct ppc440spe_adma_desc_slot,
2115 chain_node);
2116 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2117
2118 iter->hw_next = list_entry(iter->chain_node.next,
2119 struct ppc440spe_adma_desc_slot,
2120 chain_node);
2121 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2122 hw_desc = iter->hw_desc;
2123 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2124
2125 ppc440spe_desc_set_dest_addr(iter, chan,
2126 DMA_CUED_XOR_BASE, dst[0], 0);
2127 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
2128 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2129 src[0]);
2130 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2131 iter->unmap_len = len;
2132
2133
2134
2135
2136
2137 iter = list_first_entry(&iter->chain_node,
2138 struct ppc440spe_adma_desc_slot,
2139 chain_node);
2140 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2141 iter->hw_next = NULL;
2142 if (flags & DMA_PREP_INTERRUPT)
2143 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2144 else
2145 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2146
2147 hw_desc = iter->hw_desc;
2148 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2149 ppc440spe_desc_set_src_addr(iter, chan, 0,
2150 DMA_CUED_XOR_HB, dst[1]);
2151 ppc440spe_desc_set_dest_addr(iter, chan,
2152 DMA_CUED_XOR_BASE, dst[0], 0);
2153
2154 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2155 DMA_CDB_SG_DST1, scf[0]);
2156 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2157 iter->unmap_len = len;
2158 sw_desc->async_tx.flags = flags;
2159 }
2160
2161 spin_unlock_bh(&ppc440spe_chan->lock);
2162
2163 return sw_desc;
2164 }
2165
2166
2167
2168
2169
2170
2171 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
2172 struct ppc440spe_adma_chan *ppc440spe_chan,
2173 dma_addr_t *dst, dma_addr_t *src, int src_cnt,
2174 const unsigned char *scf, size_t len, unsigned long flags)
2175 {
2176 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2177 unsigned long op = 0;
2178 int slot_cnt;
2179
2180 set_bit(PPC440SPE_DESC_WXOR, &op);
2181 slot_cnt = 3;
2182
2183 spin_lock_bh(&ppc440spe_chan->lock);
2184
2185
2186 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2187 if (sw_desc) {
2188 struct ppc440spe_adma_chan *chan;
2189 struct ppc440spe_adma_desc_slot *iter;
2190 struct dma_cdb *hw_desc;
2191
2192 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2193 set_bits(op, &sw_desc->flags);
2194 sw_desc->src_cnt = src_cnt;
2195 sw_desc->dst_cnt = 1;
2196
2197 iter = list_first_entry(&sw_desc->group_list,
2198 struct ppc440spe_adma_desc_slot,
2199 chain_node);
2200 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2201 iter->hw_next = list_entry(iter->chain_node.next,
2202 struct ppc440spe_adma_desc_slot,
2203 chain_node);
2204 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2205 hw_desc = iter->hw_desc;
2206 hw_desc->opc = DMA_CDB_OPC_MULTICAST;
2207
2208 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2209 *dst, 0);
2210 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2211 ppc440spe_chan->qdest, 1);
2212 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2213 src[1]);
2214 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2215 iter->unmap_len = len;
2216
2217
2218
2219 iter = list_first_entry(&iter->chain_node,
2220 struct ppc440spe_adma_desc_slot,
2221 chain_node);
2222 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2223
2224 iter->hw_next = list_entry(iter->chain_node.next,
2225 struct ppc440spe_adma_desc_slot,
2226 chain_node);
2227 if (flags & DMA_PREP_INTERRUPT)
2228 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2229 else
2230 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2231
2232 hw_desc = iter->hw_desc;
2233 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2234 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2235 ppc440spe_chan->qdest);
2236 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2237 *dst, 0);
2238 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2239 DMA_CDB_SG_DST1, scf[1]);
2240 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2241 iter->unmap_len = len;
2242
2243
2244
2245
2246
2247 iter = list_first_entry(&iter->chain_node,
2248 struct ppc440spe_adma_desc_slot,
2249 chain_node);
2250 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2251 iter->hw_next = NULL;
2252 if (flags & DMA_PREP_INTERRUPT)
2253 set_bit(PPC440SPE_DESC_INT, &iter->flags);
2254 else
2255 clear_bit(PPC440SPE_DESC_INT, &iter->flags);
2256
2257 hw_desc = iter->hw_desc;
2258 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2259 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
2260 src[0]);
2261 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
2262 *dst, 0);
2263 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2264 DMA_CDB_SG_DST1, scf[0]);
2265 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
2266 iter->unmap_len = len;
2267 sw_desc->async_tx.flags = flags;
2268 }
2269
2270 spin_unlock_bh(&ppc440spe_chan->lock);
2271
2272 return sw_desc;
2273 }
2274
2275 static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
2276 struct ppc440spe_adma_chan *ppc440spe_chan,
2277 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2278 const unsigned char *scf, size_t len, unsigned long flags)
2279 {
2280 int slot_cnt;
2281 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2282 unsigned long op = 0;
2283 unsigned char mult = 1;
2284
2285 pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
2286 __func__, dst_cnt, src_cnt, len);
2287
2288
2289
2290
2291 set_bit(PPC440SPE_DESC_WXOR, &op);
2292 if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
2293
2294
2295
2296
2297
2298
2299 if (src_cnt > 1 &&
2300 !(len & MQ0_CF2H_RXOR_BS_MASK) &&
2301 (src[0] + len) == src[1]) {
2302
2303 set_bit(PPC440SPE_DESC_RXOR, &op);
2304 if (src_cnt != 2) {
2305
2306 if ((src[1] + len) == src[2]) {
2307
2308 set_bit(PPC440SPE_DESC_RXOR123,
2309 &op);
2310 } else if ((src[1] + len * 2) == src[2]) {
2311
2312 set_bit(PPC440SPE_DESC_RXOR124, &op);
2313 } else if ((src[1] + len * 3) == src[2]) {
2314
2315 set_bit(PPC440SPE_DESC_RXOR125,
2316 &op);
2317 } else {
2318
2319 set_bit(PPC440SPE_DESC_RXOR12,
2320 &op);
2321 }
2322 } else {
2323
2324 set_bit(PPC440SPE_DESC_RXOR12, &op);
2325 }
2326 }
2327
2328 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2329
2330 clear_bit(PPC440SPE_RXOR_RUN,
2331 &ppc440spe_rxor_state);
2332 } else {
2333
2334 ppc440spe_desc_set_rxor_block_size(len);
2335 }
2336 }
2337
2338
2339 if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
2340
2341
2342
2343
2344 slot_cnt = src_cnt;
2345
2346 if (flags & DMA_PREP_ZERO_P) {
2347 slot_cnt++;
2348 set_bit(PPC440SPE_ZERO_P, &op);
2349 }
2350 if (flags & DMA_PREP_ZERO_Q) {
2351 slot_cnt++;
2352 set_bit(PPC440SPE_ZERO_Q, &op);
2353 }
2354 } else {
2355
2356
2357
2358
2359 slot_cnt = dst_cnt;
2360
2361 if (flags & DMA_PREP_ZERO_P)
2362 set_bit(PPC440SPE_ZERO_P, &op);
2363 if (flags & DMA_PREP_ZERO_Q)
2364 set_bit(PPC440SPE_ZERO_Q, &op);
2365
2366 if (test_bit(PPC440SPE_DESC_RXOR12, &op))
2367 slot_cnt += src_cnt - 2;
2368 else
2369 slot_cnt += src_cnt - 3;
2370
2371
2372
2373
2374 if (slot_cnt == dst_cnt)
2375
2376 clear_bit(PPC440SPE_DESC_WXOR, &op);
2377 }
2378
2379 spin_lock_bh(&ppc440spe_chan->lock);
2380
2381 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2382 if (sw_desc) {
2383 ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
2384 flags, op);
2385
2386
2387 pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
2388 __func__, dst[0], dst[1]);
2389 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2390 while (src_cnt--) {
2391 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2392 src_cnt);
2393
2394
2395
2396
2397
2398
2399
2400 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2401 mult = scf[src_cnt];
2402 ppc440spe_adma_pq_set_src_mult(sw_desc,
2403 mult, src_cnt, dst_cnt - 1);
2404 }
2405
2406
2407 sw_desc->async_tx.flags = flags;
2408 list_for_each_entry(iter, &sw_desc->group_list,
2409 chain_node) {
2410 ppc440spe_desc_set_byte_count(iter,
2411 ppc440spe_chan, len);
2412 iter->unmap_len = len;
2413 }
2414 }
2415 spin_unlock_bh(&ppc440spe_chan->lock);
2416
2417 return sw_desc;
2418 }
2419
2420 static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
2421 struct ppc440spe_adma_chan *ppc440spe_chan,
2422 dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
2423 const unsigned char *scf, size_t len, unsigned long flags)
2424 {
2425 int slot_cnt, descs_per_op;
2426 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
2427 unsigned long op = 0;
2428 unsigned char mult = 1;
2429
2430 BUG_ON(!dst_cnt);
2431
2432
2433
2434 spin_lock_bh(&ppc440spe_chan->lock);
2435 descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
2436 if (descs_per_op < 0) {
2437 spin_unlock_bh(&ppc440spe_chan->lock);
2438 return NULL;
2439 }
2440
2441
2442 slot_cnt = descs_per_op * dst_cnt;
2443
2444 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
2445 if (sw_desc) {
2446 op = slot_cnt;
2447 sw_desc->async_tx.flags = flags;
2448 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2449 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
2450 --op ? 0 : flags);
2451 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2452 len);
2453 iter->unmap_len = len;
2454
2455 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
2456 iter->rxor_cursor.len = len;
2457 iter->descs_per_op = descs_per_op;
2458 }
2459 op = 0;
2460 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2461 op++;
2462 if (op % descs_per_op == 0)
2463 ppc440spe_adma_init_dma2rxor_slot(iter, src,
2464 src_cnt);
2465 if (likely(!list_is_last(&iter->chain_node,
2466 &sw_desc->group_list))) {
2467
2468 iter->hw_next =
2469 list_entry(iter->chain_node.next,
2470 struct ppc440spe_adma_desc_slot,
2471 chain_node);
2472 ppc440spe_xor_set_link(iter, iter->hw_next);
2473 } else {
2474
2475 iter->hw_next = NULL;
2476 }
2477 }
2478
2479
2480 sw_desc->dst_cnt = dst_cnt;
2481 if (flags & DMA_PREP_ZERO_P)
2482 set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
2483 if (flags & DMA_PREP_ZERO_Q)
2484 set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
2485
2486
2487 ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
2488
2489 while (src_cnt--) {
2490
2491
2492
2493 ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
2494 src_cnt);
2495 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
2496 mult = scf[src_cnt];
2497 ppc440spe_adma_pq_set_src_mult(sw_desc,
2498 mult, src_cnt, dst_cnt - 1);
2499 }
2500 }
2501 spin_unlock_bh(&ppc440spe_chan->lock);
2502 ppc440spe_desc_set_rxor_block_size(len);
2503 return sw_desc;
2504 }
2505
2506
2507
2508
2509 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
2510 struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
2511 unsigned int src_cnt, const unsigned char *scf,
2512 size_t len, unsigned long flags)
2513 {
2514 struct ppc440spe_adma_chan *ppc440spe_chan;
2515 struct ppc440spe_adma_desc_slot *sw_desc = NULL;
2516 int dst_cnt = 0;
2517
2518 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2519
2520 ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
2521 dst, src, src_cnt));
2522 BUG_ON(!len);
2523 BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
2524 BUG_ON(!src_cnt);
2525
2526 if (src_cnt == 1 && dst[1] == src[0]) {
2527 dma_addr_t dest[2];
2528
2529
2530 dest[0] = dst[1];
2531
2532 dest[1] = ppc440spe_chan->qdest;
2533 sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
2534 dest, 2, src, src_cnt, scf, len, flags);
2535 return sw_desc ? &sw_desc->async_tx : NULL;
2536 }
2537
2538 if (src_cnt == 2 && dst[1] == src[1]) {
2539 sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
2540 &dst[1], src, 2, scf, len, flags);
2541 return sw_desc ? &sw_desc->async_tx : NULL;
2542 }
2543
2544 if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
2545 BUG_ON(!dst[0]);
2546 dst_cnt++;
2547 flags |= DMA_PREP_ZERO_P;
2548 }
2549
2550 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
2551 BUG_ON(!dst[1]);
2552 dst_cnt++;
2553 flags |= DMA_PREP_ZERO_Q;
2554 }
2555
2556 BUG_ON(!dst_cnt);
2557
2558 dev_dbg(ppc440spe_chan->device->common.dev,
2559 "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
2560 ppc440spe_chan->device->id, __func__, src_cnt, len,
2561 flags & DMA_PREP_INTERRUPT ? 1 : 0);
2562
2563 switch (ppc440spe_chan->device->id) {
2564 case PPC440SPE_DMA0_ID:
2565 case PPC440SPE_DMA1_ID:
2566 sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
2567 dst, dst_cnt, src, src_cnt, scf,
2568 len, flags);
2569 break;
2570
2571 case PPC440SPE_XOR_ID:
2572 sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
2573 dst, dst_cnt, src, src_cnt, scf,
2574 len, flags);
2575 break;
2576 }
2577
2578 return sw_desc ? &sw_desc->async_tx : NULL;
2579 }
2580
2581
2582
2583
2584
2585 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
2586 struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
2587 unsigned int src_cnt, const unsigned char *scf, size_t len,
2588 enum sum_check_flags *pqres, unsigned long flags)
2589 {
2590 struct ppc440spe_adma_chan *ppc440spe_chan;
2591 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
2592 dma_addr_t pdest, qdest;
2593 int slot_cnt, slots_per_op, idst, dst_cnt;
2594
2595 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
2596
2597 if (flags & DMA_PREP_PQ_DISABLE_P)
2598 pdest = 0;
2599 else
2600 pdest = pq[0];
2601
2602 if (flags & DMA_PREP_PQ_DISABLE_Q)
2603 qdest = 0;
2604 else
2605 qdest = pq[1];
2606
2607 ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
2608 src, src_cnt, scf));
2609
2610
2611
2612
2613 idst = dst_cnt = (pdest && qdest) ? 2 : 1;
2614
2615
2616
2617
2618 slot_cnt = src_cnt + dst_cnt * 2;
2619 slots_per_op = 1;
2620
2621 spin_lock_bh(&ppc440spe_chan->lock);
2622 sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
2623 slots_per_op);
2624 if (sw_desc) {
2625 ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
2626
2627
2628 sw_desc->async_tx.flags = flags;
2629 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
2630 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2631 len);
2632 iter->unmap_len = len;
2633 }
2634
2635 if (pdest) {
2636 struct dma_cdb *hw_desc;
2637 struct ppc440spe_adma_chan *chan;
2638
2639 iter = sw_desc->group_head;
2640 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2641 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2642 iter->hw_next = list_entry(iter->chain_node.next,
2643 struct ppc440spe_adma_desc_slot,
2644 chain_node);
2645 hw_desc = iter->hw_desc;
2646 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2647 iter->src_cnt = 0;
2648 iter->dst_cnt = 0;
2649 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2650 ppc440spe_chan->pdest, 0);
2651 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
2652 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2653 len);
2654 iter->unmap_len = 0;
2655
2656 pdest = ppc440spe_chan->pdest;
2657 }
2658 if (qdest) {
2659 struct dma_cdb *hw_desc;
2660 struct ppc440spe_adma_chan *chan;
2661
2662 iter = list_first_entry(&sw_desc->group_list,
2663 struct ppc440spe_adma_desc_slot,
2664 chain_node);
2665 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2666
2667 if (pdest) {
2668 iter = list_entry(iter->chain_node.next,
2669 struct ppc440spe_adma_desc_slot,
2670 chain_node);
2671 }
2672
2673 memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
2674 iter->hw_next = list_entry(iter->chain_node.next,
2675 struct ppc440spe_adma_desc_slot,
2676 chain_node);
2677 hw_desc = iter->hw_desc;
2678 hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
2679 iter->src_cnt = 0;
2680 iter->dst_cnt = 0;
2681 ppc440spe_desc_set_dest_addr(iter, chan, 0,
2682 ppc440spe_chan->qdest, 0);
2683 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
2684 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
2685 len);
2686 iter->unmap_len = 0;
2687
2688 qdest = ppc440spe_chan->qdest;
2689 }
2690
2691
2692 ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
2693
2694
2695 idst = dst_cnt;
2696 list_for_each_entry_reverse(iter, &sw_desc->group_list,
2697 chain_node) {
2698
2699
2700
2701
2702
2703 if (idst == DMA_DEST_MAX_NUM) {
2704 if (idst == dst_cnt) {
2705 set_bit(PPC440SPE_DESC_QCHECK,
2706 &iter->flags);
2707 } else {
2708 set_bit(PPC440SPE_DESC_PCHECK,
2709 &iter->flags);
2710 }
2711 } else {
2712 if (qdest) {
2713 set_bit(PPC440SPE_DESC_QCHECK,
2714 &iter->flags);
2715 } else {
2716 set_bit(PPC440SPE_DESC_PCHECK,
2717 &iter->flags);
2718 }
2719 }
2720 iter->xor_check_result = pqres;
2721
2722
2723
2724
2725
2726 *iter->xor_check_result = 0;
2727 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
2728 ppc440spe_qword);
2729
2730 if (!(--dst_cnt))
2731 break;
2732 }
2733
2734
2735 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
2736 chain_node) {
2737 struct ppc440spe_adma_chan *chan;
2738 u32 mult_dst;
2739
2740 chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
2741 ppc440spe_desc_set_src_addr(iter, chan, 0,
2742 DMA_CUED_XOR_HB,
2743 src[src_cnt - 1]);
2744 if (qdest) {
2745 mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
2746 DMA_CDB_SG_DST1;
2747 ppc440spe_desc_set_src_mult(iter, chan,
2748 DMA_CUED_MULT1_OFF,
2749 mult_dst,
2750 scf[src_cnt - 1]);
2751 }
2752 if (!(--src_cnt))
2753 break;
2754 }
2755 }
2756 spin_unlock_bh(&ppc440spe_chan->lock);
2757 return sw_desc ? &sw_desc->async_tx : NULL;
2758 }
2759
2760
2761
2762
2763
2764 static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
2765 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
2766 size_t len, enum sum_check_flags *result, unsigned long flags)
2767 {
2768 struct dma_async_tx_descriptor *tx;
2769 dma_addr_t pq[2];
2770
2771
2772 pq[0] = src[0];
2773 pq[1] = 0;
2774 flags |= DMA_PREP_PQ_DISABLE_Q;
2775
2776 tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
2777 src_cnt - 1, 0, len,
2778 result, flags);
2779 return tx;
2780 }
2781
2782
2783
2784
2785 static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
2786 dma_addr_t addr, int index)
2787 {
2788 struct ppc440spe_adma_chan *chan;
2789
2790 BUG_ON(index >= sw_desc->dst_cnt);
2791
2792 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2793
2794 switch (chan->device->id) {
2795 case PPC440SPE_DMA0_ID:
2796 case PPC440SPE_DMA1_ID:
2797
2798
2799
2800 ppc440spe_desc_set_dest_addr(sw_desc->group_head,
2801 chan, 0, addr, index);
2802 break;
2803 case PPC440SPE_XOR_ID:
2804 sw_desc = ppc440spe_get_group_entry(sw_desc, index);
2805 ppc440spe_desc_set_dest_addr(sw_desc,
2806 chan, 0, addr, index);
2807 break;
2808 }
2809 }
2810
2811 static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
2812 struct ppc440spe_adma_chan *chan, dma_addr_t addr)
2813 {
2814
2815
2816
2817
2818 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
2819
2820
2821 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
2822
2823
2824 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
2825 DMA_CDB_SG_DST1, 1);
2826 }
2827
2828
2829
2830
2831
2832 static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
2833 dma_addr_t *addrs, unsigned long flags)
2834 {
2835 struct ppc440spe_adma_desc_slot *iter;
2836 struct ppc440spe_adma_chan *chan;
2837 dma_addr_t paddr, qaddr;
2838 dma_addr_t addr = 0, ppath, qpath;
2839 int index = 0, i;
2840
2841 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
2842
2843 if (flags & DMA_PREP_PQ_DISABLE_P)
2844 paddr = 0;
2845 else
2846 paddr = addrs[0];
2847
2848 if (flags & DMA_PREP_PQ_DISABLE_Q)
2849 qaddr = 0;
2850 else
2851 qaddr = addrs[1];
2852
2853 if (!paddr || !qaddr)
2854 addr = paddr ? paddr : qaddr;
2855
2856 switch (chan->device->id) {
2857 case PPC440SPE_DMA0_ID:
2858 case PPC440SPE_DMA1_ID:
2859
2860
2861
2862 if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
2863
2864 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
2865 index++;
2866 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
2867 index++;
2868
2869 iter = ppc440spe_get_group_entry(sw_desc, index);
2870 if (addr) {
2871
2872 list_for_each_entry_from(iter,
2873 &sw_desc->group_list, chain_node)
2874 ppc440spe_desc_set_dest_addr(iter, chan,
2875 DMA_CUED_XOR_BASE, addr, 0);
2876 } else {
2877
2878 list_for_each_entry_from(iter,
2879 &sw_desc->group_list, chain_node) {
2880 ppc440spe_desc_set_dest_addr(iter, chan,
2881 DMA_CUED_XOR_BASE, paddr, 0);
2882 ppc440spe_desc_set_dest_addr(iter, chan,
2883 DMA_CUED_XOR_BASE, qaddr, 1);
2884 }
2885 }
2886
2887 if (index) {
2888
2889
2890
2891 index = 0;
2892 if (test_bit(PPC440SPE_ZERO_P,
2893 &sw_desc->flags)) {
2894 iter = ppc440spe_get_group_entry(
2895 sw_desc, index++);
2896 ppc440spe_adma_pq_zero_op(iter, chan,
2897 paddr);
2898 }
2899
2900 if (test_bit(PPC440SPE_ZERO_Q,
2901 &sw_desc->flags)) {
2902 iter = ppc440spe_get_group_entry(
2903 sw_desc, index++);
2904 ppc440spe_adma_pq_zero_op(iter, chan,
2905 qaddr);
2906 }
2907
2908 return;
2909 }
2910 } else {
2911
2912
2913
2914
2915
2916 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
2917 DMA_CUED_XOR_HB :
2918 DMA_CUED_XOR_BASE |
2919 (1 << DMA_CUED_MULT1_OFF);
2920 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
2921 DMA_CUED_XOR_HB :
2922 DMA_CUED_XOR_BASE |
2923 (1 << DMA_CUED_MULT1_OFF);
2924
2925
2926 iter = ppc440spe_get_group_entry(sw_desc, index++);
2927 ppc440spe_desc_set_dest_addr(iter, chan,
2928 paddr ? ppath : qpath,
2929 paddr ? paddr : qaddr, 0);
2930 if (!addr) {
2931
2932 iter = ppc440spe_get_group_entry(sw_desc,
2933 index++);
2934 ppc440spe_desc_set_dest_addr(iter, chan,
2935 qpath, qaddr, 0);
2936 }
2937
2938 if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
2939
2940
2941
2942 iter = ppc440spe_get_group_entry(sw_desc,
2943 index);
2944 if (addr) {
2945
2946 list_for_each_entry_from(iter,
2947 &sw_desc->group_list,
2948 chain_node)
2949 ppc440spe_desc_set_dest_addr(
2950 iter, chan,
2951 DMA_CUED_XOR_BASE,
2952 addr, 0);
2953
2954 } else {
2955
2956 list_for_each_entry_from(iter,
2957 &sw_desc->group_list,
2958 chain_node) {
2959 ppc440spe_desc_set_dest_addr(
2960 iter, chan,
2961 DMA_CUED_XOR_BASE,
2962 paddr, 0);
2963 ppc440spe_desc_set_dest_addr(
2964 iter, chan,
2965 DMA_CUED_XOR_BASE,
2966 qaddr, 1);
2967 }
2968 }
2969 }
2970
2971 }
2972 break;
2973
2974 case PPC440SPE_XOR_ID:
2975
2976
2977
2978
2979
2980 ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
2981 DMA_CUED_XOR_HB :
2982 DMA_CUED_XOR_BASE |
2983 (1 << DMA_CUED_MULT1_OFF);
2984
2985 qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
2986 DMA_CUED_XOR_HB :
2987 DMA_CUED_XOR_BASE |
2988 (1 << DMA_CUED_MULT1_OFF);
2989
2990 iter = ppc440spe_get_group_entry(sw_desc, 0);
2991 for (i = 0; i < sw_desc->descs_per_op; i++) {
2992 ppc440spe_desc_set_dest_addr(iter, chan,
2993 paddr ? ppath : qpath,
2994 paddr ? paddr : qaddr, 0);
2995 iter = list_entry(iter->chain_node.next,
2996 struct ppc440spe_adma_desc_slot,
2997 chain_node);
2998 }
2999
3000 if (!addr) {
3001
3002 iter = ppc440spe_get_group_entry(sw_desc,
3003 sw_desc->descs_per_op);
3004 for (i = 0; i < sw_desc->descs_per_op; i++) {
3005 ppc440spe_desc_set_dest_addr(iter,
3006 chan, qpath, qaddr, 0);
3007 iter = list_entry(iter->chain_node.next,
3008 struct ppc440spe_adma_desc_slot,
3009 chain_node);
3010 }
3011 }
3012
3013 break;
3014 }
3015 }
3016
3017
3018
3019
3020
3021 static void ppc440spe_adma_pqzero_sum_set_dest(
3022 struct ppc440spe_adma_desc_slot *sw_desc,
3023 dma_addr_t paddr, dma_addr_t qaddr)
3024 {
3025 struct ppc440spe_adma_desc_slot *iter, *end;
3026 struct ppc440spe_adma_chan *chan;
3027 dma_addr_t addr = 0;
3028 int idx;
3029
3030 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3031
3032
3033
3034
3035 idx = (paddr && qaddr) ? 2 : 1;
3036
3037 list_for_each_entry_reverse(end, &sw_desc->group_list,
3038 chain_node) {
3039 if (!(--idx))
3040 break;
3041 }
3042
3043 idx = (paddr && qaddr) ? 2 : 1;
3044 iter = ppc440spe_get_group_entry(sw_desc, idx);
3045
3046 if (paddr && qaddr) {
3047
3048 list_for_each_entry_from(iter, &sw_desc->group_list,
3049 chain_node) {
3050 if (unlikely(iter == end))
3051 break;
3052 ppc440spe_desc_set_dest_addr(iter, chan,
3053 DMA_CUED_XOR_BASE, paddr, 0);
3054 ppc440spe_desc_set_dest_addr(iter, chan,
3055 DMA_CUED_XOR_BASE, qaddr, 1);
3056 }
3057 } else {
3058
3059 addr = paddr ? paddr : qaddr;
3060 list_for_each_entry_from(iter, &sw_desc->group_list,
3061 chain_node) {
3062 if (unlikely(iter == end))
3063 break;
3064 ppc440spe_desc_set_dest_addr(iter, chan,
3065 DMA_CUED_XOR_BASE, addr, 0);
3066 }
3067 }
3068
3069
3070
3071
3072
3073 ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
3074
3075 if (!addr) {
3076 end = list_entry(end->chain_node.next,
3077 struct ppc440spe_adma_desc_slot, chain_node);
3078 ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
3079 }
3080 }
3081
3082
3083
3084
3085 static inline void ppc440spe_desc_set_xor_src_cnt(
3086 struct ppc440spe_adma_desc_slot *desc,
3087 int src_cnt)
3088 {
3089 struct xor_cb *hw_desc = desc->hw_desc;
3090
3091 hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
3092 hw_desc->cbc |= src_cnt;
3093 }
3094
3095
3096
3097
3098 static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
3099 dma_addr_t addr, int index)
3100 {
3101 struct ppc440spe_adma_chan *chan;
3102 dma_addr_t haddr = 0;
3103 struct ppc440spe_adma_desc_slot *iter = NULL;
3104
3105 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3106
3107 switch (chan->device->id) {
3108 case PPC440SPE_DMA0_ID:
3109 case PPC440SPE_DMA1_ID:
3110
3111
3112 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3113
3114 int iskip = test_bit(PPC440SPE_DESC_RXOR12,
3115 &sw_desc->flags) ? 2 : 3;
3116
3117 if (index == 0) {
3118
3119
3120
3121
3122 if (test_bit(PPC440SPE_DESC_RXOR12,
3123 &sw_desc->flags))
3124 haddr = DMA_RXOR12 <<
3125 DMA_CUED_REGION_OFF;
3126 else if (test_bit(PPC440SPE_DESC_RXOR123,
3127 &sw_desc->flags))
3128 haddr = DMA_RXOR123 <<
3129 DMA_CUED_REGION_OFF;
3130 else if (test_bit(PPC440SPE_DESC_RXOR124,
3131 &sw_desc->flags))
3132 haddr = DMA_RXOR124 <<
3133 DMA_CUED_REGION_OFF;
3134 else if (test_bit(PPC440SPE_DESC_RXOR125,
3135 &sw_desc->flags))
3136 haddr = DMA_RXOR125 <<
3137 DMA_CUED_REGION_OFF;
3138 else
3139 BUG();
3140 haddr |= DMA_CUED_XOR_BASE;
3141 iter = ppc440spe_get_group_entry(sw_desc, 0);
3142 } else if (index < iskip) {
3143
3144
3145
3146
3147 iter = NULL;
3148 } else {
3149
3150
3151
3152 haddr = DMA_CUED_XOR_HB;
3153 iter = ppc440spe_get_group_entry(sw_desc,
3154 index - iskip + sw_desc->dst_cnt);
3155 }
3156 } else {
3157 int znum = 0;
3158
3159
3160
3161
3162 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3163 znum++;
3164 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3165 znum++;
3166
3167 haddr = DMA_CUED_XOR_HB;
3168 iter = ppc440spe_get_group_entry(sw_desc,
3169 index + znum);
3170 }
3171
3172 if (likely(iter)) {
3173 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
3174
3175 if (!index &&
3176 test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
3177 sw_desc->dst_cnt == 2) {
3178
3179
3180
3181 iter = ppc440spe_get_group_entry(sw_desc, 1);
3182 ppc440spe_desc_set_src_addr(iter, chan, 0,
3183 haddr, addr);
3184 }
3185 }
3186 break;
3187
3188 case PPC440SPE_XOR_ID:
3189
3190 iter = sw_desc->group_head;
3191 if (iter->dst_cnt == 2) {
3192
3193 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3194
3195
3196 iter = ppc440spe_get_group_entry(sw_desc,
3197 sw_desc->descs_per_op);
3198 }
3199 ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
3200 break;
3201 }
3202 }
3203
3204
3205
3206
3207 static void ppc440spe_adma_memcpy_xor_set_src(
3208 struct ppc440spe_adma_desc_slot *sw_desc,
3209 dma_addr_t addr, int index)
3210 {
3211 struct ppc440spe_adma_chan *chan;
3212
3213 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3214 sw_desc = sw_desc->group_head;
3215
3216 if (likely(sw_desc))
3217 ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
3218 }
3219
3220
3221
3222
3223 static void ppc440spe_adma_dma2rxor_inc_addr(
3224 struct ppc440spe_adma_desc_slot *desc,
3225 struct ppc440spe_rxor *cursor, int index, int src_cnt)
3226 {
3227 cursor->addr_count++;
3228 if (index == src_cnt - 1) {
3229 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3230 } else if (cursor->addr_count == XOR_MAX_OPS) {
3231 ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
3232 cursor->addr_count = 0;
3233 cursor->desc_count++;
3234 }
3235 }
3236
3237
3238
3239
3240 static int ppc440spe_adma_dma2rxor_prep_src(
3241 struct ppc440spe_adma_desc_slot *hdesc,
3242 struct ppc440spe_rxor *cursor, int index,
3243 int src_cnt, u32 addr)
3244 {
3245 int rval = 0;
3246 u32 sign;
3247 struct ppc440spe_adma_desc_slot *desc = hdesc;
3248 int i;
3249
3250 for (i = 0; i < cursor->desc_count; i++) {
3251 desc = list_entry(hdesc->chain_node.next,
3252 struct ppc440spe_adma_desc_slot,
3253 chain_node);
3254 }
3255
3256 switch (cursor->state) {
3257 case 0:
3258 if (addr == cursor->addrl + cursor->len) {
3259
3260 cursor->state = 1;
3261 cursor->xor_count++;
3262 if (index == src_cnt-1) {
3263 ppc440spe_rxor_set_region(desc,
3264 cursor->addr_count,
3265 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3266 ppc440spe_adma_dma2rxor_inc_addr(
3267 desc, cursor, index, src_cnt);
3268 }
3269 } else if (cursor->addrl == addr + cursor->len) {
3270
3271 cursor->state = 1;
3272 cursor->xor_count++;
3273 set_bit(cursor->addr_count, &desc->reverse_flags[0]);
3274 if (index == src_cnt-1) {
3275 ppc440spe_rxor_set_region(desc,
3276 cursor->addr_count,
3277 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3278 ppc440spe_adma_dma2rxor_inc_addr(
3279 desc, cursor, index, src_cnt);
3280 }
3281 } else {
3282 printk(KERN_ERR "Cannot build "
3283 "DMA2 RXOR command block.\n");
3284 BUG();
3285 }
3286 break;
3287 case 1:
3288 sign = test_bit(cursor->addr_count,
3289 desc->reverse_flags)
3290 ? -1 : 1;
3291 if (index == src_cnt-2 || (sign == -1
3292 && addr != cursor->addrl - 2*cursor->len)) {
3293 cursor->state = 0;
3294 cursor->xor_count = 1;
3295 cursor->addrl = addr;
3296 ppc440spe_rxor_set_region(desc,
3297 cursor->addr_count,
3298 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3299 ppc440spe_adma_dma2rxor_inc_addr(
3300 desc, cursor, index, src_cnt);
3301 } else if (addr == cursor->addrl + 2*sign*cursor->len) {
3302 cursor->state = 2;
3303 cursor->xor_count = 0;
3304 ppc440spe_rxor_set_region(desc,
3305 cursor->addr_count,
3306 DMA_RXOR123 << DMA_CUED_REGION_OFF);
3307 if (index == src_cnt-1) {
3308 ppc440spe_adma_dma2rxor_inc_addr(
3309 desc, cursor, index, src_cnt);
3310 }
3311 } else if (addr == cursor->addrl + 3*cursor->len) {
3312 cursor->state = 2;
3313 cursor->xor_count = 0;
3314 ppc440spe_rxor_set_region(desc,
3315 cursor->addr_count,
3316 DMA_RXOR124 << DMA_CUED_REGION_OFF);
3317 if (index == src_cnt-1) {
3318 ppc440spe_adma_dma2rxor_inc_addr(
3319 desc, cursor, index, src_cnt);
3320 }
3321 } else if (addr == cursor->addrl + 4*cursor->len) {
3322 cursor->state = 2;
3323 cursor->xor_count = 0;
3324 ppc440spe_rxor_set_region(desc,
3325 cursor->addr_count,
3326 DMA_RXOR125 << DMA_CUED_REGION_OFF);
3327 if (index == src_cnt-1) {
3328 ppc440spe_adma_dma2rxor_inc_addr(
3329 desc, cursor, index, src_cnt);
3330 }
3331 } else {
3332 cursor->state = 0;
3333 cursor->xor_count = 1;
3334 cursor->addrl = addr;
3335 ppc440spe_rxor_set_region(desc,
3336 cursor->addr_count,
3337 DMA_RXOR12 << DMA_CUED_REGION_OFF);
3338 ppc440spe_adma_dma2rxor_inc_addr(
3339 desc, cursor, index, src_cnt);
3340 }
3341 break;
3342 case 2:
3343 cursor->state = 0;
3344 cursor->addrl = addr;
3345 cursor->xor_count++;
3346 if (index) {
3347 ppc440spe_adma_dma2rxor_inc_addr(
3348 desc, cursor, index, src_cnt);
3349 }
3350 break;
3351 }
3352
3353 return rval;
3354 }
3355
3356
3357
3358
3359
3360 static void ppc440spe_adma_dma2rxor_set_src(
3361 struct ppc440spe_adma_desc_slot *desc,
3362 int index, dma_addr_t addr)
3363 {
3364 struct xor_cb *xcb = desc->hw_desc;
3365 int k = 0, op = 0, lop = 0;
3366
3367
3368 while (op <= index) {
3369 lop = op;
3370 if (k == XOR_MAX_OPS) {
3371 k = 0;
3372 desc = list_entry(desc->chain_node.next,
3373 struct ppc440spe_adma_desc_slot, chain_node);
3374 xcb = desc->hw_desc;
3375
3376 }
3377 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3378 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3379 op += 2;
3380 else
3381 op += 3;
3382 }
3383
3384 BUG_ON(k < 1);
3385
3386 if (test_bit(k-1, desc->reverse_flags)) {
3387
3388 if (index == op - 1)
3389 ppc440spe_rxor_set_src(desc, k - 1, addr);
3390 } else {
3391
3392 if (index == lop)
3393 ppc440spe_rxor_set_src(desc, k - 1, addr);
3394 }
3395 }
3396
3397
3398
3399
3400
3401 static void ppc440spe_adma_dma2rxor_set_mult(
3402 struct ppc440spe_adma_desc_slot *desc,
3403 int index, u8 mult)
3404 {
3405 struct xor_cb *xcb = desc->hw_desc;
3406 int k = 0, op = 0, lop = 0;
3407
3408
3409 while (op <= index) {
3410 lop = op;
3411 if (k == XOR_MAX_OPS) {
3412 k = 0;
3413 desc = list_entry(desc->chain_node.next,
3414 struct ppc440spe_adma_desc_slot,
3415 chain_node);
3416 xcb = desc->hw_desc;
3417
3418 }
3419 if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
3420 (DMA_RXOR12 << DMA_CUED_REGION_OFF))
3421 op += 2;
3422 else
3423 op += 3;
3424 }
3425
3426 BUG_ON(k < 1);
3427 if (test_bit(k-1, desc->reverse_flags)) {
3428
3429 ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
3430 } else {
3431
3432 ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
3433 }
3434 }
3435
3436
3437
3438
3439 static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
3440 {
3441 memset(cursor, 0, sizeof(struct ppc440spe_rxor));
3442 cursor->state = 2;
3443 }
3444
3445
3446
3447
3448
3449 static void ppc440spe_adma_pq_set_src_mult(
3450 struct ppc440spe_adma_desc_slot *sw_desc,
3451 unsigned char mult, int index, int dst_pos)
3452 {
3453 struct ppc440spe_adma_chan *chan;
3454 u32 mult_idx, mult_dst;
3455 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
3456
3457 chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
3458
3459 switch (chan->device->id) {
3460 case PPC440SPE_DMA0_ID:
3461 case PPC440SPE_DMA1_ID:
3462 if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
3463 int region = test_bit(PPC440SPE_DESC_RXOR12,
3464 &sw_desc->flags) ? 2 : 3;
3465
3466 if (index < region) {
3467
3468 iter = ppc440spe_get_group_entry(sw_desc,
3469 sw_desc->dst_cnt - 1);
3470 if (sw_desc->dst_cnt == 2)
3471 iter1 = ppc440spe_get_group_entry(
3472 sw_desc, 0);
3473
3474 mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
3475 mult_dst = DMA_CDB_SG_SRC;
3476 } else {
3477
3478 iter = ppc440spe_get_group_entry(sw_desc,
3479 index - region +
3480 sw_desc->dst_cnt);
3481 mult_idx = DMA_CUED_MULT1_OFF;
3482 mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
3483 DMA_CDB_SG_DST1;
3484 }
3485 } else {
3486 int znum = 0;
3487
3488
3489
3490
3491
3492 if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
3493 znum++;
3494 if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
3495 znum++;
3496
3497 iter = ppc440spe_get_group_entry(sw_desc, index + znum);
3498 mult_idx = DMA_CUED_MULT1_OFF;
3499 mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
3500 }
3501
3502 if (likely(iter)) {
3503 ppc440spe_desc_set_src_mult(iter, chan,
3504 mult_idx, mult_dst, mult);
3505
3506 if (unlikely(iter1)) {
3507
3508
3509
3510 ppc440spe_desc_set_src_mult(iter1, chan,
3511 mult_idx, mult_dst, 1);
3512 }
3513
3514 }
3515 break;
3516
3517 case PPC440SPE_XOR_ID:
3518 iter = sw_desc->group_head;
3519 if (sw_desc->dst_cnt == 2) {
3520
3521 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
3522
3523
3524 iter = ppc440spe_get_group_entry(sw_desc,
3525 sw_desc->descs_per_op);
3526 }
3527 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
3528 break;
3529 }
3530 }
3531
3532
3533
3534
3535 static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
3536 {
3537 struct ppc440spe_adma_chan *ppc440spe_chan;
3538 struct ppc440spe_adma_desc_slot *iter, *_iter;
3539 int in_use_descs = 0;
3540
3541 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3542 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3543
3544 spin_lock_bh(&ppc440spe_chan->lock);
3545 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
3546 chain_node) {
3547 in_use_descs++;
3548 list_del(&iter->chain_node);
3549 }
3550 list_for_each_entry_safe_reverse(iter, _iter,
3551 &ppc440spe_chan->all_slots, slot_node) {
3552 list_del(&iter->slot_node);
3553 kfree(iter);
3554 ppc440spe_chan->slots_allocated--;
3555 }
3556 ppc440spe_chan->last_used = NULL;
3557
3558 dev_dbg(ppc440spe_chan->device->common.dev,
3559 "ppc440spe adma%d %s slots_allocated %d\n",
3560 ppc440spe_chan->device->id,
3561 __func__, ppc440spe_chan->slots_allocated);
3562 spin_unlock_bh(&ppc440spe_chan->lock);
3563
3564
3565 if (in_use_descs > 1)
3566 printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
3567 in_use_descs - 1);
3568 }
3569
3570
3571
3572
3573
3574
3575
3576 static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
3577 dma_cookie_t cookie, struct dma_tx_state *txstate)
3578 {
3579 struct ppc440spe_adma_chan *ppc440spe_chan;
3580 enum dma_status ret;
3581
3582 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3583 ret = dma_cookie_status(chan, cookie, txstate);
3584 if (ret == DMA_COMPLETE)
3585 return ret;
3586
3587 ppc440spe_adma_slot_cleanup(ppc440spe_chan);
3588
3589 return dma_cookie_status(chan, cookie, txstate);
3590 }
3591
3592
3593
3594
3595 static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
3596 {
3597 struct ppc440spe_adma_chan *chan = data;
3598
3599 dev_dbg(chan->device->common.dev,
3600 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3601
3602 tasklet_schedule(&chan->irq_tasklet);
3603 ppc440spe_adma_device_clear_eot_status(chan);
3604
3605 return IRQ_HANDLED;
3606 }
3607
3608
3609
3610
3611
3612 static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
3613 {
3614 struct ppc440spe_adma_chan *chan = data;
3615
3616 dev_dbg(chan->device->common.dev,
3617 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3618
3619 tasklet_schedule(&chan->irq_tasklet);
3620 ppc440spe_adma_device_clear_eot_status(chan);
3621
3622 return IRQ_HANDLED;
3623 }
3624
3625
3626
3627
3628 static void ppc440spe_test_callback(void *unused)
3629 {
3630 complete(&ppc440spe_r6_test_comp);
3631 }
3632
3633
3634
3635
3636 static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
3637 {
3638 struct ppc440spe_adma_chan *ppc440spe_chan;
3639
3640 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
3641 dev_dbg(ppc440spe_chan->device->common.dev,
3642 "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
3643 __func__, ppc440spe_chan->pending);
3644
3645 if (ppc440spe_chan->pending) {
3646 ppc440spe_chan->pending = 0;
3647 ppc440spe_chan_append(ppc440spe_chan);
3648 }
3649 }
3650
3651
3652
3653
3654
3655
3656 static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
3657 {
3658 struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
3659 dma_cookie_t cookie;
3660 int slot_cnt, slots_per_op;
3661
3662 dev_dbg(chan->device->common.dev,
3663 "ppc440spe adma%d: %s\n", chan->device->id, __func__);
3664
3665 spin_lock_bh(&chan->lock);
3666 slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
3667 sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
3668 if (sw_desc) {
3669 group_start = sw_desc->group_head;
3670 list_splice_init(&sw_desc->group_list, &chan->chain);
3671 async_tx_ack(&sw_desc->async_tx);
3672 ppc440spe_desc_init_null_xor(group_start);
3673
3674 cookie = dma_cookie_assign(&sw_desc->async_tx);
3675
3676
3677
3678
3679 chan->common.completed_cookie = cookie - 1;
3680
3681
3682 BUG_ON(ppc440spe_chan_is_busy(chan));
3683
3684
3685 ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
3686
3687
3688 ppc440spe_chan_run(chan);
3689 } else
3690 printk(KERN_ERR "ppc440spe adma%d"
3691 " failed to allocate null descriptor\n",
3692 chan->device->id);
3693 spin_unlock_bh(&chan->lock);
3694 }
3695
3696
3697
3698
3699
3700
3701
3702 static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
3703 {
3704 struct ppc440spe_adma_desc_slot *sw_desc, *iter;
3705 struct page *pg;
3706 char *a;
3707 dma_addr_t dma_addr, addrs[2];
3708 unsigned long op = 0;
3709 int rval = 0;
3710
3711 set_bit(PPC440SPE_DESC_WXOR, &op);
3712
3713 pg = alloc_page(GFP_KERNEL);
3714 if (!pg)
3715 return -ENOMEM;
3716
3717 spin_lock_bh(&chan->lock);
3718 sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
3719 if (sw_desc) {
3720
3721 ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
3722 list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
3723 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
3724 iter->unmap_len = PAGE_SIZE;
3725 }
3726 } else {
3727 rval = -EFAULT;
3728 spin_unlock_bh(&chan->lock);
3729 goto exit;
3730 }
3731 spin_unlock_bh(&chan->lock);
3732
3733
3734 memset(page_address(pg), 0xFF, PAGE_SIZE);
3735 dma_addr = dma_map_page(chan->device->dev, pg, 0,
3736 PAGE_SIZE, DMA_BIDIRECTIONAL);
3737
3738
3739 ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
3740 ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
3741 addrs[0] = dma_addr;
3742 addrs[1] = 0;
3743 ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
3744
3745 async_tx_ack(&sw_desc->async_tx);
3746 sw_desc->async_tx.callback = ppc440spe_test_callback;
3747 sw_desc->async_tx.callback_param = NULL;
3748
3749 init_completion(&ppc440spe_r6_test_comp);
3750
3751 ppc440spe_adma_tx_submit(&sw_desc->async_tx);
3752 ppc440spe_adma_issue_pending(&chan->common);
3753
3754 wait_for_completion(&ppc440spe_r6_test_comp);
3755
3756
3757 a = page_address(pg);
3758 if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
3759
3760 rval = 0;
3761 } else {
3762
3763 rval = -EINVAL;
3764 }
3765 exit:
3766 __free_page(pg);
3767 return rval;
3768 }
3769
3770 static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
3771 {
3772 switch (adev->id) {
3773 case PPC440SPE_DMA0_ID:
3774 case PPC440SPE_DMA1_ID:
3775 dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
3776 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
3777 dma_cap_set(DMA_PQ, adev->common.cap_mask);
3778 dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
3779 dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
3780 break;
3781 case PPC440SPE_XOR_ID:
3782 dma_cap_set(DMA_XOR, adev->common.cap_mask);
3783 dma_cap_set(DMA_PQ, adev->common.cap_mask);
3784 dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
3785 adev->common.cap_mask = adev->common.cap_mask;
3786 break;
3787 }
3788
3789
3790 adev->common.device_alloc_chan_resources =
3791 ppc440spe_adma_alloc_chan_resources;
3792 adev->common.device_free_chan_resources =
3793 ppc440spe_adma_free_chan_resources;
3794 adev->common.device_tx_status = ppc440spe_adma_tx_status;
3795 adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
3796
3797
3798 if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
3799 adev->common.device_prep_dma_memcpy =
3800 ppc440spe_adma_prep_dma_memcpy;
3801 }
3802 if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
3803 adev->common.max_xor = XOR_MAX_OPS;
3804 adev->common.device_prep_dma_xor =
3805 ppc440spe_adma_prep_dma_xor;
3806 }
3807 if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
3808 switch (adev->id) {
3809 case PPC440SPE_DMA0_ID:
3810 dma_set_maxpq(&adev->common,
3811 DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
3812 break;
3813 case PPC440SPE_DMA1_ID:
3814 dma_set_maxpq(&adev->common,
3815 DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
3816 break;
3817 case PPC440SPE_XOR_ID:
3818 adev->common.max_pq = XOR_MAX_OPS * 3;
3819 break;
3820 }
3821 adev->common.device_prep_dma_pq =
3822 ppc440spe_adma_prep_dma_pq;
3823 }
3824 if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
3825 switch (adev->id) {
3826 case PPC440SPE_DMA0_ID:
3827 adev->common.max_pq = DMA0_FIFO_SIZE /
3828 sizeof(struct dma_cdb);
3829 break;
3830 case PPC440SPE_DMA1_ID:
3831 adev->common.max_pq = DMA1_FIFO_SIZE /
3832 sizeof(struct dma_cdb);
3833 break;
3834 }
3835 adev->common.device_prep_dma_pq_val =
3836 ppc440spe_adma_prep_dma_pqzero_sum;
3837 }
3838 if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
3839 switch (adev->id) {
3840 case PPC440SPE_DMA0_ID:
3841 adev->common.max_xor = DMA0_FIFO_SIZE /
3842 sizeof(struct dma_cdb);
3843 break;
3844 case PPC440SPE_DMA1_ID:
3845 adev->common.max_xor = DMA1_FIFO_SIZE /
3846 sizeof(struct dma_cdb);
3847 break;
3848 }
3849 adev->common.device_prep_dma_xor_val =
3850 ppc440spe_adma_prep_dma_xor_zero_sum;
3851 }
3852 if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
3853 adev->common.device_prep_dma_interrupt =
3854 ppc440spe_adma_prep_dma_interrupt;
3855 }
3856 pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
3857 "( %s%s%s%s%s%s)\n",
3858 dev_name(adev->dev),
3859 dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
3860 dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
3861 dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
3862 dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
3863 dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
3864 dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
3865 }
3866
3867 static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
3868 struct ppc440spe_adma_chan *chan,
3869 int *initcode)
3870 {
3871 struct platform_device *ofdev;
3872 struct device_node *np;
3873 int ret;
3874
3875 ofdev = container_of(adev->dev, struct platform_device, dev);
3876 np = ofdev->dev.of_node;
3877 if (adev->id != PPC440SPE_XOR_ID) {
3878 adev->err_irq = irq_of_parse_and_map(np, 1);
3879 if (!adev->err_irq) {
3880 dev_warn(adev->dev, "no err irq resource?\n");
3881 *initcode = PPC_ADMA_INIT_IRQ2;
3882 adev->err_irq = -ENXIO;
3883 } else
3884 atomic_inc(&ppc440spe_adma_err_irq_ref);
3885 } else {
3886 adev->err_irq = -ENXIO;
3887 }
3888
3889 adev->irq = irq_of_parse_and_map(np, 0);
3890 if (!adev->irq) {
3891 dev_err(adev->dev, "no irq resource\n");
3892 *initcode = PPC_ADMA_INIT_IRQ1;
3893 ret = -ENXIO;
3894 goto err_irq_map;
3895 }
3896 dev_dbg(adev->dev, "irq %d, err irq %d\n",
3897 adev->irq, adev->err_irq);
3898
3899 ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
3900 0, dev_driver_string(adev->dev), chan);
3901 if (ret) {
3902 dev_err(adev->dev, "can't request irq %d\n",
3903 adev->irq);
3904 *initcode = PPC_ADMA_INIT_IRQ1;
3905 ret = -EIO;
3906 goto err_req1;
3907 }
3908
3909
3910
3911
3912 if (adev->err_irq > 0) {
3913
3914 ret = request_irq(adev->err_irq,
3915 ppc440spe_adma_err_handler,
3916 IRQF_SHARED,
3917 dev_driver_string(adev->dev),
3918 chan);
3919 if (ret) {
3920 dev_err(adev->dev, "can't request irq %d\n",
3921 adev->err_irq);
3922 *initcode = PPC_ADMA_INIT_IRQ2;
3923 ret = -EIO;
3924 goto err_req2;
3925 }
3926 }
3927
3928 if (adev->id == PPC440SPE_XOR_ID) {
3929
3930 iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
3931 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
3932 &adev->xor_reg->ier);
3933 } else {
3934 u32 mask, enable;
3935
3936 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
3937 if (!np) {
3938 pr_err("%s: can't find I2O device tree node\n",
3939 __func__);
3940 ret = -ENODEV;
3941 goto err_req2;
3942 }
3943 adev->i2o_reg = of_iomap(np, 0);
3944 if (!adev->i2o_reg) {
3945 pr_err("%s: failed to map I2O registers\n", __func__);
3946 of_node_put(np);
3947 ret = -EINVAL;
3948 goto err_req2;
3949 }
3950 of_node_put(np);
3951
3952
3953
3954 enable = (adev->id == PPC440SPE_DMA0_ID) ?
3955 ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
3956 ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
3957 mask = ioread32(&adev->i2o_reg->iopim) & enable;
3958 iowrite32(mask, &adev->i2o_reg->iopim);
3959 }
3960 return 0;
3961
3962 err_req2:
3963 free_irq(adev->irq, chan);
3964 err_req1:
3965 irq_dispose_mapping(adev->irq);
3966 err_irq_map:
3967 if (adev->err_irq > 0) {
3968 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
3969 irq_dispose_mapping(adev->err_irq);
3970 }
3971 return ret;
3972 }
3973
3974 static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
3975 struct ppc440spe_adma_chan *chan)
3976 {
3977 u32 mask, disable;
3978
3979 if (adev->id == PPC440SPE_XOR_ID) {
3980
3981 mask = ioread32be(&adev->xor_reg->ier);
3982 mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
3983 XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
3984 iowrite32be(mask, &adev->xor_reg->ier);
3985 } else {
3986
3987 disable = (adev->id == PPC440SPE_DMA0_ID) ?
3988 (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
3989 (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
3990 mask = ioread32(&adev->i2o_reg->iopim) | disable;
3991 iowrite32(mask, &adev->i2o_reg->iopim);
3992 }
3993 free_irq(adev->irq, chan);
3994 irq_dispose_mapping(adev->irq);
3995 if (adev->err_irq > 0) {
3996 free_irq(adev->err_irq, chan);
3997 if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
3998 irq_dispose_mapping(adev->err_irq);
3999 iounmap(adev->i2o_reg);
4000 }
4001 }
4002 }
4003
4004
4005
4006
4007 static int ppc440spe_adma_probe(struct platform_device *ofdev)
4008 {
4009 struct device_node *np = ofdev->dev.of_node;
4010 struct resource res;
4011 struct ppc440spe_adma_device *adev;
4012 struct ppc440spe_adma_chan *chan;
4013 struct ppc_dma_chan_ref *ref, *_ref;
4014 int ret = 0, initcode = PPC_ADMA_INIT_OK;
4015 const u32 *idx;
4016 int len;
4017 void *regs;
4018 u32 id, pool_size;
4019
4020 if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
4021 id = PPC440SPE_XOR_ID;
4022
4023
4024
4025
4026 pool_size = PAGE_SIZE << 1;
4027 } else {
4028
4029 idx = of_get_property(np, "cell-index", &len);
4030 if (!idx || (len != sizeof(u32))) {
4031 dev_err(&ofdev->dev, "Device node %pOF has missing "
4032 "or invalid cell-index property\n",
4033 np);
4034 return -EINVAL;
4035 }
4036 id = *idx;
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048 pool_size = (id == PPC440SPE_DMA0_ID) ?
4049 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4050 pool_size <<= 2;
4051 }
4052
4053 if (of_address_to_resource(np, 0, &res)) {
4054 dev_err(&ofdev->dev, "failed to get memory resource\n");
4055 initcode = PPC_ADMA_INIT_MEMRES;
4056 ret = -ENODEV;
4057 goto out;
4058 }
4059
4060 if (!request_mem_region(res.start, resource_size(&res),
4061 dev_driver_string(&ofdev->dev))) {
4062 dev_err(&ofdev->dev, "failed to request memory region %pR\n",
4063 &res);
4064 initcode = PPC_ADMA_INIT_MEMREG;
4065 ret = -EBUSY;
4066 goto out;
4067 }
4068
4069
4070 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
4071 if (!adev) {
4072 initcode = PPC_ADMA_INIT_ALLOC;
4073 ret = -ENOMEM;
4074 goto err_adev_alloc;
4075 }
4076
4077 adev->id = id;
4078 adev->pool_size = pool_size;
4079
4080 adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
4081 adev->pool_size, &adev->dma_desc_pool,
4082 GFP_KERNEL);
4083 if (adev->dma_desc_pool_virt == NULL) {
4084 dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
4085 "memory for hardware descriptors\n",
4086 adev->pool_size);
4087 initcode = PPC_ADMA_INIT_COHERENT;
4088 ret = -ENOMEM;
4089 goto err_dma_alloc;
4090 }
4091 dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n",
4092 adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
4093
4094 regs = ioremap(res.start, resource_size(&res));
4095 if (!regs) {
4096 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
4097 ret = -ENOMEM;
4098 goto err_regs_alloc;
4099 }
4100
4101 if (adev->id == PPC440SPE_XOR_ID) {
4102 adev->xor_reg = regs;
4103
4104 iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
4105 iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
4106 } else {
4107 size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
4108 DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
4109 adev->dma_reg = regs;
4110
4111
4112
4113
4114
4115 iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
4116 &adev->dma_reg->fsiz);
4117
4118 iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
4119 &adev->dma_reg->cfg);
4120
4121 iowrite32(~0, &adev->dma_reg->dsts);
4122 }
4123
4124 adev->dev = &ofdev->dev;
4125 adev->common.dev = &ofdev->dev;
4126 INIT_LIST_HEAD(&adev->common.channels);
4127 platform_set_drvdata(ofdev, adev);
4128
4129
4130 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
4131 if (!chan) {
4132 initcode = PPC_ADMA_INIT_CHANNEL;
4133 ret = -ENOMEM;
4134 goto err_chan_alloc;
4135 }
4136
4137 spin_lock_init(&chan->lock);
4138 INIT_LIST_HEAD(&chan->chain);
4139 INIT_LIST_HEAD(&chan->all_slots);
4140 chan->device = adev;
4141 chan->common.device = &adev->common;
4142 dma_cookie_init(&chan->common);
4143 list_add_tail(&chan->common.device_node, &adev->common.channels);
4144 tasklet_init(&chan->irq_tasklet, ppc440spe_adma_tasklet,
4145 (unsigned long)chan);
4146
4147
4148
4149
4150 if (adev->id != PPC440SPE_XOR_ID) {
4151 chan->pdest_page = alloc_page(GFP_KERNEL);
4152 chan->qdest_page = alloc_page(GFP_KERNEL);
4153 if (!chan->pdest_page ||
4154 !chan->qdest_page) {
4155 if (chan->pdest_page)
4156 __free_page(chan->pdest_page);
4157 if (chan->qdest_page)
4158 __free_page(chan->qdest_page);
4159 ret = -ENOMEM;
4160 goto err_page_alloc;
4161 }
4162 chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
4163 PAGE_SIZE, DMA_BIDIRECTIONAL);
4164 chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
4165 PAGE_SIZE, DMA_BIDIRECTIONAL);
4166 }
4167
4168 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
4169 if (ref) {
4170 ref->chan = &chan->common;
4171 INIT_LIST_HEAD(&ref->node);
4172 list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
4173 } else {
4174 dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
4175 ret = -ENOMEM;
4176 goto err_ref_alloc;
4177 }
4178
4179 ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
4180 if (ret)
4181 goto err_irq;
4182
4183 ppc440spe_adma_init_capabilities(adev);
4184
4185 ret = dma_async_device_register(&adev->common);
4186 if (ret) {
4187 initcode = PPC_ADMA_INIT_REGISTER;
4188 dev_err(&ofdev->dev, "failed to register dma device\n");
4189 goto err_dev_reg;
4190 }
4191
4192 goto out;
4193
4194 err_dev_reg:
4195 ppc440spe_adma_release_irqs(adev, chan);
4196 err_irq:
4197 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
4198 if (chan == to_ppc440spe_adma_chan(ref->chan)) {
4199 list_del(&ref->node);
4200 kfree(ref);
4201 }
4202 }
4203 err_ref_alloc:
4204 if (adev->id != PPC440SPE_XOR_ID) {
4205 dma_unmap_page(&ofdev->dev, chan->pdest,
4206 PAGE_SIZE, DMA_BIDIRECTIONAL);
4207 dma_unmap_page(&ofdev->dev, chan->qdest,
4208 PAGE_SIZE, DMA_BIDIRECTIONAL);
4209 __free_page(chan->pdest_page);
4210 __free_page(chan->qdest_page);
4211 }
4212 err_page_alloc:
4213 kfree(chan);
4214 err_chan_alloc:
4215 if (adev->id == PPC440SPE_XOR_ID)
4216 iounmap(adev->xor_reg);
4217 else
4218 iounmap(adev->dma_reg);
4219 err_regs_alloc:
4220 dma_free_coherent(adev->dev, adev->pool_size,
4221 adev->dma_desc_pool_virt,
4222 adev->dma_desc_pool);
4223 err_dma_alloc:
4224 kfree(adev);
4225 err_adev_alloc:
4226 release_mem_region(res.start, resource_size(&res));
4227 out:
4228 if (id < PPC440SPE_ADMA_ENGINES_NUM)
4229 ppc440spe_adma_devices[id] = initcode;
4230
4231 return ret;
4232 }
4233
4234
4235
4236
4237 static int ppc440spe_adma_remove(struct platform_device *ofdev)
4238 {
4239 struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
4240 struct device_node *np = ofdev->dev.of_node;
4241 struct resource res;
4242 struct dma_chan *chan, *_chan;
4243 struct ppc_dma_chan_ref *ref, *_ref;
4244 struct ppc440spe_adma_chan *ppc440spe_chan;
4245
4246 if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
4247 ppc440spe_adma_devices[adev->id] = -1;
4248
4249 dma_async_device_unregister(&adev->common);
4250
4251 list_for_each_entry_safe(chan, _chan, &adev->common.channels,
4252 device_node) {
4253 ppc440spe_chan = to_ppc440spe_adma_chan(chan);
4254 ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
4255 tasklet_kill(&ppc440spe_chan->irq_tasklet);
4256 if (adev->id != PPC440SPE_XOR_ID) {
4257 dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
4258 PAGE_SIZE, DMA_BIDIRECTIONAL);
4259 dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
4260 PAGE_SIZE, DMA_BIDIRECTIONAL);
4261 __free_page(ppc440spe_chan->pdest_page);
4262 __free_page(ppc440spe_chan->qdest_page);
4263 }
4264 list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
4265 node) {
4266 if (ppc440spe_chan ==
4267 to_ppc440spe_adma_chan(ref->chan)) {
4268 list_del(&ref->node);
4269 kfree(ref);
4270 }
4271 }
4272 list_del(&chan->device_node);
4273 kfree(ppc440spe_chan);
4274 }
4275
4276 dma_free_coherent(adev->dev, adev->pool_size,
4277 adev->dma_desc_pool_virt, adev->dma_desc_pool);
4278 if (adev->id == PPC440SPE_XOR_ID)
4279 iounmap(adev->xor_reg);
4280 else
4281 iounmap(adev->dma_reg);
4282 of_address_to_resource(np, 0, &res);
4283 release_mem_region(res.start, resource_size(&res));
4284 kfree(adev);
4285 return 0;
4286 }
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298 static ssize_t devices_show(struct device_driver *dev, char *buf)
4299 {
4300 ssize_t size = 0;
4301 int i;
4302
4303 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
4304 if (ppc440spe_adma_devices[i] == -1)
4305 continue;
4306 size += snprintf(buf + size, PAGE_SIZE - size,
4307 "PPC440SP(E)-ADMA.%d: %s\n", i,
4308 ppc_adma_errors[ppc440spe_adma_devices[i]]);
4309 }
4310 return size;
4311 }
4312 static DRIVER_ATTR_RO(devices);
4313
4314 static ssize_t enable_show(struct device_driver *dev, char *buf)
4315 {
4316 return snprintf(buf, PAGE_SIZE,
4317 "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
4318 ppc440spe_r6_enabled ? "EN" : "DIS");
4319 }
4320
4321 static ssize_t enable_store(struct device_driver *dev, const char *buf,
4322 size_t count)
4323 {
4324 unsigned long val;
4325
4326 if (!count || count > 11)
4327 return -EINVAL;
4328
4329 if (!ppc440spe_r6_tchan)
4330 return -EFAULT;
4331
4332
4333 sscanf(buf, "%lx", &val);
4334 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
4335 isync();
4336
4337
4338 if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
4339 pr_info("PPC440SP(e) RAID-6 has been activated "
4340 "successfully\n");
4341 ppc440spe_r6_enabled = 1;
4342 } else {
4343 pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
4344 " Error key ?\n");
4345 ppc440spe_r6_enabled = 0;
4346 }
4347 return count;
4348 }
4349 static DRIVER_ATTR_RW(enable);
4350
4351 static ssize_t poly_show(struct device_driver *dev, char *buf)
4352 {
4353 ssize_t size = 0;
4354 u32 reg;
4355
4356 #ifdef CONFIG_440SP
4357
4358 reg = 0x4d;
4359 #else
4360 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4361 reg >>= MQ0_CFBHL_POLY;
4362 reg &= 0xFF;
4363 #endif
4364
4365 size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
4366 "uses 0x1%02x polynomial.\n", reg);
4367 return size;
4368 }
4369
4370 static ssize_t poly_store(struct device_driver *dev, const char *buf,
4371 size_t count)
4372 {
4373 unsigned long reg, val;
4374
4375 #ifdef CONFIG_440SP
4376
4377 return -EINVAL;
4378 #endif
4379
4380 if (!count || count > 6)
4381 return -EINVAL;
4382
4383
4384 sscanf(buf, "%lx", &val);
4385
4386 if (val & ~0x1FF)
4387 return -EINVAL;
4388
4389 val &= 0xFF;
4390 reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
4391 reg &= ~(0xFF << MQ0_CFBHL_POLY);
4392 reg |= val << MQ0_CFBHL_POLY;
4393 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
4394
4395 return count;
4396 }
4397 static DRIVER_ATTR_RW(poly);
4398
4399
4400
4401
4402
4403
4404 static int ppc440spe_configure_raid_devices(void)
4405 {
4406 struct device_node *np;
4407 struct resource i2o_res;
4408 struct i2o_regs __iomem *i2o_reg;
4409 dcr_host_t i2o_dcr_host;
4410 unsigned int dcr_base, dcr_len;
4411 int i, ret;
4412
4413 np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
4414 if (!np) {
4415 pr_err("%s: can't find I2O device tree node\n",
4416 __func__);
4417 return -ENODEV;
4418 }
4419
4420 if (of_address_to_resource(np, 0, &i2o_res)) {
4421 of_node_put(np);
4422 return -EINVAL;
4423 }
4424
4425 i2o_reg = of_iomap(np, 0);
4426 if (!i2o_reg) {
4427 pr_err("%s: failed to map I2O registers\n", __func__);
4428 of_node_put(np);
4429 return -EINVAL;
4430 }
4431
4432
4433 dcr_base = dcr_resource_start(np, 0);
4434 dcr_len = dcr_resource_len(np, 0);
4435 if (!dcr_base && !dcr_len) {
4436 pr_err("%pOF: can't get DCR registers base/len!\n", np);
4437 of_node_put(np);
4438 iounmap(i2o_reg);
4439 return -ENODEV;
4440 }
4441
4442 i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
4443 if (!DCR_MAP_OK(i2o_dcr_host)) {
4444 pr_err("%pOF: failed to map DCRs!\n", np);
4445 of_node_put(np);
4446 iounmap(i2o_reg);
4447 return -ENODEV;
4448 }
4449 of_node_put(np);
4450
4451
4452
4453
4454
4455
4456 ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
4457 GFP_KERNEL);
4458 if (!ppc440spe_dma_fifo_buf) {
4459 pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
4460 iounmap(i2o_reg);
4461 dcr_unmap(i2o_dcr_host, dcr_len);
4462 return -ENOMEM;
4463 }
4464
4465
4466
4467
4468
4469 mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
4470 mtdcri(SDR0, DCRN_SDR0_SRST, 0);
4471
4472
4473 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
4474 dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
4475 I2O_REG_ENABLE);
4476 dcr_unmap(i2o_dcr_host, dcr_len);
4477
4478
4479 iowrite32(0, &i2o_reg->ifbah);
4480 iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
4481
4482
4483
4484
4485
4486 iowrite32(0, &i2o_reg->ifsiz);
4487 iounmap(i2o_reg);
4488
4489
4490
4491
4492
4493 np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
4494 if (!np) {
4495 pr_err("%s: can't find MQ device tree node\n",
4496 __func__);
4497 ret = -ENODEV;
4498 goto out_free;
4499 }
4500
4501
4502 dcr_base = dcr_resource_start(np, 0);
4503 dcr_len = dcr_resource_len(np, 0);
4504 if (!dcr_base && !dcr_len) {
4505 pr_err("%pOF: can't get DCR registers base/len!\n", np);
4506 ret = -ENODEV;
4507 goto out_mq;
4508 }
4509
4510 ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
4511 if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
4512 pr_err("%pOF: failed to map DCRs!\n", np);
4513 ret = -ENODEV;
4514 goto out_mq;
4515 }
4516 of_node_put(np);
4517 ppc440spe_mq_dcr_len = dcr_len;
4518
4519
4520 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
4521
4522
4523
4524
4525
4526
4527 dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
4528 (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
4529 (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
4530
4531 atomic_set(&ppc440spe_adma_err_irq_ref, 0);
4532 for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
4533 ppc440spe_adma_devices[i] = -1;
4534
4535 return 0;
4536
4537 out_mq:
4538 of_node_put(np);
4539 out_free:
4540 kfree(ppc440spe_dma_fifo_buf);
4541 return ret;
4542 }
4543
4544 static const struct of_device_id ppc440spe_adma_of_match[] = {
4545 { .compatible = "ibm,dma-440spe", },
4546 { .compatible = "amcc,xor-accelerator", },
4547 {},
4548 };
4549 MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
4550
4551 static struct platform_driver ppc440spe_adma_driver = {
4552 .probe = ppc440spe_adma_probe,
4553 .remove = ppc440spe_adma_remove,
4554 .driver = {
4555 .name = "PPC440SP(E)-ADMA",
4556 .of_match_table = ppc440spe_adma_of_match,
4557 },
4558 };
4559
4560 static __init int ppc440spe_adma_init(void)
4561 {
4562 int ret;
4563
4564 ret = ppc440spe_configure_raid_devices();
4565 if (ret)
4566 return ret;
4567
4568 ret = platform_driver_register(&ppc440spe_adma_driver);
4569 if (ret) {
4570 pr_err("%s: failed to register platform driver\n",
4571 __func__);
4572 goto out_reg;
4573 }
4574
4575
4576 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4577 &driver_attr_devices);
4578 if (ret)
4579 goto out_dev;
4580
4581
4582 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4583 &driver_attr_enable);
4584 if (ret)
4585 goto out_en;
4586
4587
4588 ret = driver_create_file(&ppc440spe_adma_driver.driver,
4589 &driver_attr_poly);
4590 if (!ret)
4591 return ret;
4592
4593 driver_remove_file(&ppc440spe_adma_driver.driver,
4594 &driver_attr_enable);
4595 out_en:
4596 driver_remove_file(&ppc440spe_adma_driver.driver,
4597 &driver_attr_devices);
4598 out_dev:
4599
4600 pr_err("%s: failed to create RAID-6 driver interface\n",
4601 __func__);
4602 platform_driver_unregister(&ppc440spe_adma_driver);
4603 out_reg:
4604 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4605 kfree(ppc440spe_dma_fifo_buf);
4606 return ret;
4607 }
4608
4609 static void __exit ppc440spe_adma_exit(void)
4610 {
4611 driver_remove_file(&ppc440spe_adma_driver.driver,
4612 &driver_attr_poly);
4613 driver_remove_file(&ppc440spe_adma_driver.driver,
4614 &driver_attr_enable);
4615 driver_remove_file(&ppc440spe_adma_driver.driver,
4616 &driver_attr_devices);
4617 platform_driver_unregister(&ppc440spe_adma_driver);
4618 dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
4619 kfree(ppc440spe_dma_fifo_buf);
4620 }
4621
4622 arch_initcall(ppc440spe_adma_init);
4623 module_exit(ppc440spe_adma_exit);
4624
4625 MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
4626 MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
4627 MODULE_LICENSE("GPL");