1/* 2 * offload engine driver for the Intel Xscale series of i/o processors 3 * Copyright © 2006, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16/* 17 * This driver supports the asynchrounous DMA copy and RAID engines available 18 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) 19 */ 20 21#include <linux/init.h> 22#include <linux/module.h> 23#include <linux/delay.h> 24#include <linux/dma-mapping.h> 25#include <linux/spinlock.h> 26#include <linux/interrupt.h> 27#include <linux/platform_device.h> 28#include <linux/memory.h> 29#include <linux/ioport.h> 30#include <linux/raid/pq.h> 31#include <linux/slab.h> 32 33#include <mach/adma.h> 34 35#include "dmaengine.h" 36 37#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common) 38#define to_iop_adma_device(dev) \ 39 container_of(dev, struct iop_adma_device, common) 40#define tx_to_iop_adma_slot(tx) \ 41 container_of(tx, struct iop_adma_desc_slot, async_tx) 42 43/** 44 * iop_adma_free_slots - flags descriptor slots for reuse 45 * @slot: Slot to free 46 * Caller must hold &iop_chan->lock while calling this function 47 */ 48static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) 49{ 50 int stride = slot->slots_per_op; 51 52 while (stride--) { 53 slot->slots_per_op = 0; 54 slot = list_entry(slot->slot_node.next, 55 struct iop_adma_desc_slot, 56 slot_node); 57 } 58} 59 60static dma_cookie_t 61iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, 62 struct iop_adma_chan *iop_chan, dma_cookie_t cookie) 63{ 64 struct dma_async_tx_descriptor *tx = &desc->async_tx; 65 66 BUG_ON(tx->cookie < 0); 67 if (tx->cookie > 0) { 68 cookie = tx->cookie; 69 tx->cookie = 0; 70 71 /* call the callback (must not sleep or submit new 72 * operations to this channel) 73 */ 74 if (tx->callback) 75 tx->callback(tx->callback_param); 76 77 dma_descriptor_unmap(tx); 78 if (desc->group_head) 79 desc->group_head = NULL; 80 } 81 82 /* run dependent operations */ 83 dma_run_dependencies(tx); 84 85 return cookie; 86} 87 88static int 89iop_adma_clean_slot(struct iop_adma_desc_slot *desc, 90 struct iop_adma_chan *iop_chan) 91{ 92 /* the client is allowed to attach dependent operations 93 * until 'ack' is set 94 */ 95 if (!async_tx_test_ack(&desc->async_tx)) 96 return 0; 97 98 /* leave the last descriptor in the chain 99 * so we can append to it 100 */ 101 if (desc->chain_node.next == &iop_chan->chain) 102 return 1; 103 104 dev_dbg(iop_chan->device->common.dev, 105 "\tfree slot: %d slots_per_op: %d\n", 106 desc->idx, desc->slots_per_op); 107 108 list_del(&desc->chain_node); 109 iop_adma_free_slots(desc); 110 111 return 0; 112} 113 114static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 115{ 116 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; 117 dma_cookie_t cookie = 0; 118 u32 current_desc = iop_chan_get_current_descriptor(iop_chan); 119 int busy = iop_chan_is_busy(iop_chan); 120 int seen_current = 0, slot_cnt = 0, slots_per_op = 0; 121 122 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 123 /* free completed slots from the chain starting with 124 * the oldest descriptor 125 */ 126 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 127 chain_node) { 128 pr_debug("\tcookie: %d slot: %d busy: %d " 129 "this_desc: %#x next_desc: %#x ack: %d\n", 130 iter->async_tx.cookie, iter->idx, busy, 131 iter->async_tx.phys, iop_desc_get_next_desc(iter), 132 async_tx_test_ack(&iter->async_tx)); 133 prefetch(_iter); 134 prefetch(&_iter->async_tx); 135 136 /* do not advance past the current descriptor loaded into the 137 * hardware channel, subsequent descriptors are either in 138 * process or have not been submitted 139 */ 140 if (seen_current) 141 break; 142 143 /* stop the search if we reach the current descriptor and the 144 * channel is busy, or if it appears that the current descriptor 145 * needs to be re-read (i.e. has been appended to) 146 */ 147 if (iter->async_tx.phys == current_desc) { 148 BUG_ON(seen_current++); 149 if (busy || iop_desc_get_next_desc(iter)) 150 break; 151 } 152 153 /* detect the start of a group transaction */ 154 if (!slot_cnt && !slots_per_op) { 155 slot_cnt = iter->slot_cnt; 156 slots_per_op = iter->slots_per_op; 157 if (slot_cnt <= slots_per_op) { 158 slot_cnt = 0; 159 slots_per_op = 0; 160 } 161 } 162 163 if (slot_cnt) { 164 pr_debug("\tgroup++\n"); 165 if (!grp_start) 166 grp_start = iter; 167 slot_cnt -= slots_per_op; 168 } 169 170 /* all the members of a group are complete */ 171 if (slots_per_op != 0 && slot_cnt == 0) { 172 struct iop_adma_desc_slot *grp_iter, *_grp_iter; 173 int end_of_chain = 0; 174 pr_debug("\tgroup end\n"); 175 176 /* collect the total results */ 177 if (grp_start->xor_check_result) { 178 u32 zero_sum_result = 0; 179 slot_cnt = grp_start->slot_cnt; 180 grp_iter = grp_start; 181 182 list_for_each_entry_from(grp_iter, 183 &iop_chan->chain, chain_node) { 184 zero_sum_result |= 185 iop_desc_get_zero_result(grp_iter); 186 pr_debug("\titer%d result: %d\n", 187 grp_iter->idx, zero_sum_result); 188 slot_cnt -= slots_per_op; 189 if (slot_cnt == 0) 190 break; 191 } 192 pr_debug("\tgrp_start->xor_check_result: %p\n", 193 grp_start->xor_check_result); 194 *grp_start->xor_check_result = zero_sum_result; 195 } 196 197 /* clean up the group */ 198 slot_cnt = grp_start->slot_cnt; 199 grp_iter = grp_start; 200 list_for_each_entry_safe_from(grp_iter, _grp_iter, 201 &iop_chan->chain, chain_node) { 202 cookie = iop_adma_run_tx_complete_actions( 203 grp_iter, iop_chan, cookie); 204 205 slot_cnt -= slots_per_op; 206 end_of_chain = iop_adma_clean_slot(grp_iter, 207 iop_chan); 208 209 if (slot_cnt == 0 || end_of_chain) 210 break; 211 } 212 213 /* the group should be complete at this point */ 214 BUG_ON(slot_cnt); 215 216 slots_per_op = 0; 217 grp_start = NULL; 218 if (end_of_chain) 219 break; 220 else 221 continue; 222 } else if (slots_per_op) /* wait for group completion */ 223 continue; 224 225 /* write back zero sum results (single descriptor case) */ 226 if (iter->xor_check_result && iter->async_tx.cookie) 227 *iter->xor_check_result = 228 iop_desc_get_zero_result(iter); 229 230 cookie = iop_adma_run_tx_complete_actions( 231 iter, iop_chan, cookie); 232 233 if (iop_adma_clean_slot(iter, iop_chan)) 234 break; 235 } 236 237 if (cookie > 0) { 238 iop_chan->common.completed_cookie = cookie; 239 pr_debug("\tcompleted cookie %d\n", cookie); 240 } 241} 242 243static void 244iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) 245{ 246 spin_lock_bh(&iop_chan->lock); 247 __iop_adma_slot_cleanup(iop_chan); 248 spin_unlock_bh(&iop_chan->lock); 249} 250 251static void iop_adma_tasklet(unsigned long data) 252{ 253 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; 254 255 /* lockdep will flag depedency submissions as potentially 256 * recursive locking, this is not the case as a dependency 257 * submission will never recurse a channels submit routine. 258 * There are checks in async_tx.c to prevent this. 259 */ 260 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING); 261 __iop_adma_slot_cleanup(iop_chan); 262 spin_unlock(&iop_chan->lock); 263} 264 265static struct iop_adma_desc_slot * 266iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots, 267 int slots_per_op) 268{ 269 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; 270 LIST_HEAD(chain); 271 int slots_found, retry = 0; 272 273 /* start search from the last allocated descrtiptor 274 * if a contiguous allocation can not be found start searching 275 * from the beginning of the list 276 */ 277retry: 278 slots_found = 0; 279 if (retry == 0) 280 iter = iop_chan->last_used; 281 else 282 iter = list_entry(&iop_chan->all_slots, 283 struct iop_adma_desc_slot, 284 slot_node); 285 286 list_for_each_entry_safe_continue( 287 iter, _iter, &iop_chan->all_slots, slot_node) { 288 prefetch(_iter); 289 prefetch(&_iter->async_tx); 290 if (iter->slots_per_op) { 291 /* give up after finding the first busy slot 292 * on the second pass through the list 293 */ 294 if (retry) 295 break; 296 297 slots_found = 0; 298 continue; 299 } 300 301 /* start the allocation if the slot is correctly aligned */ 302 if (!slots_found++) { 303 if (iop_desc_is_aligned(iter, slots_per_op)) 304 alloc_start = iter; 305 else { 306 slots_found = 0; 307 continue; 308 } 309 } 310 311 if (slots_found == num_slots) { 312 struct iop_adma_desc_slot *alloc_tail = NULL; 313 struct iop_adma_desc_slot *last_used = NULL; 314 iter = alloc_start; 315 while (num_slots) { 316 int i; 317 dev_dbg(iop_chan->device->common.dev, 318 "allocated slot: %d " 319 "(desc %p phys: %#x) slots_per_op %d\n", 320 iter->idx, iter->hw_desc, 321 iter->async_tx.phys, slots_per_op); 322 323 /* pre-ack all but the last descriptor */ 324 if (num_slots != slots_per_op) 325 async_tx_ack(&iter->async_tx); 326 327 list_add_tail(&iter->chain_node, &chain); 328 alloc_tail = iter; 329 iter->async_tx.cookie = 0; 330 iter->slot_cnt = num_slots; 331 iter->xor_check_result = NULL; 332 for (i = 0; i < slots_per_op; i++) { 333 iter->slots_per_op = slots_per_op - i; 334 last_used = iter; 335 iter = list_entry(iter->slot_node.next, 336 struct iop_adma_desc_slot, 337 slot_node); 338 } 339 num_slots -= slots_per_op; 340 } 341 alloc_tail->group_head = alloc_start; 342 alloc_tail->async_tx.cookie = -EBUSY; 343 list_splice(&chain, &alloc_tail->tx_list); 344 iop_chan->last_used = last_used; 345 iop_desc_clear_next_desc(alloc_start); 346 iop_desc_clear_next_desc(alloc_tail); 347 return alloc_tail; 348 } 349 } 350 if (!retry++) 351 goto retry; 352 353 /* perform direct reclaim if the allocation fails */ 354 __iop_adma_slot_cleanup(iop_chan); 355 356 return NULL; 357} 358 359static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan) 360{ 361 dev_dbg(iop_chan->device->common.dev, "pending: %d\n", 362 iop_chan->pending); 363 364 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) { 365 iop_chan->pending = 0; 366 iop_chan_append(iop_chan); 367 } 368} 369 370static dma_cookie_t 371iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) 372{ 373 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); 374 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); 375 struct iop_adma_desc_slot *grp_start, *old_chain_tail; 376 int slot_cnt; 377 int slots_per_op; 378 dma_cookie_t cookie; 379 dma_addr_t next_dma; 380 381 grp_start = sw_desc->group_head; 382 slot_cnt = grp_start->slot_cnt; 383 slots_per_op = grp_start->slots_per_op; 384 385 spin_lock_bh(&iop_chan->lock); 386 cookie = dma_cookie_assign(tx); 387 388 old_chain_tail = list_entry(iop_chan->chain.prev, 389 struct iop_adma_desc_slot, chain_node); 390 list_splice_init(&sw_desc->tx_list, 391 &old_chain_tail->chain_node); 392 393 /* fix up the hardware chain */ 394 next_dma = grp_start->async_tx.phys; 395 iop_desc_set_next_desc(old_chain_tail, next_dma); 396 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */ 397 398 /* check for pre-chained descriptors */ 399 iop_paranoia(iop_desc_get_next_desc(sw_desc)); 400 401 /* increment the pending count by the number of slots 402 * memcpy operations have a 1:1 (slot:operation) relation 403 * other operations are heavier and will pop the threshold 404 * more often. 405 */ 406 iop_chan->pending += slot_cnt; 407 iop_adma_check_threshold(iop_chan); 408 spin_unlock_bh(&iop_chan->lock); 409 410 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", 411 __func__, sw_desc->async_tx.cookie, sw_desc->idx); 412 413 return cookie; 414} 415 416static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan); 417static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); 418 419/** 420 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors 421 * @chan - allocate descriptor resources for this channel 422 * @client - current client requesting the channel be ready for requests 423 * 424 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To 425 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be 426 * greater than 2x the number slots needed to satisfy a device->max_xor 427 * request. 428 * */ 429static int iop_adma_alloc_chan_resources(struct dma_chan *chan) 430{ 431 char *hw_desc; 432 int idx; 433 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 434 struct iop_adma_desc_slot *slot = NULL; 435 int init = iop_chan->slots_allocated ? 0 : 1; 436 struct iop_adma_platform_data *plat_data = 437 dev_get_platdata(&iop_chan->device->pdev->dev); 438 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE; 439 440 /* Allocate descriptor slots */ 441 do { 442 idx = iop_chan->slots_allocated; 443 if (idx == num_descs_in_pool) 444 break; 445 446 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 447 if (!slot) { 448 printk(KERN_INFO "IOP ADMA Channel only initialized" 449 " %d descriptor slots", idx); 450 break; 451 } 452 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt; 453 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 454 455 dma_async_tx_descriptor_init(&slot->async_tx, chan); 456 slot->async_tx.tx_submit = iop_adma_tx_submit; 457 INIT_LIST_HEAD(&slot->tx_list); 458 INIT_LIST_HEAD(&slot->chain_node); 459 INIT_LIST_HEAD(&slot->slot_node); 460 hw_desc = (char *) iop_chan->device->dma_desc_pool; 461 slot->async_tx.phys = 462 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; 463 slot->idx = idx; 464 465 spin_lock_bh(&iop_chan->lock); 466 iop_chan->slots_allocated++; 467 list_add_tail(&slot->slot_node, &iop_chan->all_slots); 468 spin_unlock_bh(&iop_chan->lock); 469 } while (iop_chan->slots_allocated < num_descs_in_pool); 470 471 if (idx && !iop_chan->last_used) 472 iop_chan->last_used = list_entry(iop_chan->all_slots.next, 473 struct iop_adma_desc_slot, 474 slot_node); 475 476 dev_dbg(iop_chan->device->common.dev, 477 "allocated %d descriptor slots last_used: %p\n", 478 iop_chan->slots_allocated, iop_chan->last_used); 479 480 /* initialize the channel and the chain with a null operation */ 481 if (init) { 482 if (dma_has_cap(DMA_MEMCPY, 483 iop_chan->device->common.cap_mask)) 484 iop_chan_start_null_memcpy(iop_chan); 485 else if (dma_has_cap(DMA_XOR, 486 iop_chan->device->common.cap_mask)) 487 iop_chan_start_null_xor(iop_chan); 488 else 489 BUG(); 490 } 491 492 return (idx > 0) ? idx : -ENOMEM; 493} 494 495static struct dma_async_tx_descriptor * 496iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 497{ 498 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 499 struct iop_adma_desc_slot *sw_desc, *grp_start; 500 int slot_cnt, slots_per_op; 501 502 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 503 504 spin_lock_bh(&iop_chan->lock); 505 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); 506 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 507 if (sw_desc) { 508 grp_start = sw_desc->group_head; 509 iop_desc_init_interrupt(grp_start, iop_chan); 510 sw_desc->async_tx.flags = flags; 511 } 512 spin_unlock_bh(&iop_chan->lock); 513 514 return sw_desc ? &sw_desc->async_tx : NULL; 515} 516 517static struct dma_async_tx_descriptor * 518iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, 519 dma_addr_t dma_src, size_t len, unsigned long flags) 520{ 521 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 522 struct iop_adma_desc_slot *sw_desc, *grp_start; 523 int slot_cnt, slots_per_op; 524 525 if (unlikely(!len)) 526 return NULL; 527 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT); 528 529 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", 530 __func__, len); 531 532 spin_lock_bh(&iop_chan->lock); 533 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); 534 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 535 if (sw_desc) { 536 grp_start = sw_desc->group_head; 537 iop_desc_init_memcpy(grp_start, flags); 538 iop_desc_set_byte_count(grp_start, iop_chan, len); 539 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 540 iop_desc_set_memcpy_src_addr(grp_start, dma_src); 541 sw_desc->async_tx.flags = flags; 542 } 543 spin_unlock_bh(&iop_chan->lock); 544 545 return sw_desc ? &sw_desc->async_tx : NULL; 546} 547 548static struct dma_async_tx_descriptor * 549iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, 550 dma_addr_t *dma_src, unsigned int src_cnt, size_t len, 551 unsigned long flags) 552{ 553 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 554 struct iop_adma_desc_slot *sw_desc, *grp_start; 555 int slot_cnt, slots_per_op; 556 557 if (unlikely(!len)) 558 return NULL; 559 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); 560 561 dev_dbg(iop_chan->device->common.dev, 562 "%s src_cnt: %d len: %u flags: %lx\n", 563 __func__, src_cnt, len, flags); 564 565 spin_lock_bh(&iop_chan->lock); 566 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); 567 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 568 if (sw_desc) { 569 grp_start = sw_desc->group_head; 570 iop_desc_init_xor(grp_start, src_cnt, flags); 571 iop_desc_set_byte_count(grp_start, iop_chan, len); 572 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest); 573 sw_desc->async_tx.flags = flags; 574 while (src_cnt--) 575 iop_desc_set_xor_src_addr(grp_start, src_cnt, 576 dma_src[src_cnt]); 577 } 578 spin_unlock_bh(&iop_chan->lock); 579 580 return sw_desc ? &sw_desc->async_tx : NULL; 581} 582 583static struct dma_async_tx_descriptor * 584iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, 585 unsigned int src_cnt, size_t len, u32 *result, 586 unsigned long flags) 587{ 588 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 589 struct iop_adma_desc_slot *sw_desc, *grp_start; 590 int slot_cnt, slots_per_op; 591 592 if (unlikely(!len)) 593 return NULL; 594 595 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 596 __func__, src_cnt, len); 597 598 spin_lock_bh(&iop_chan->lock); 599 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); 600 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 601 if (sw_desc) { 602 grp_start = sw_desc->group_head; 603 iop_desc_init_zero_sum(grp_start, src_cnt, flags); 604 iop_desc_set_zero_sum_byte_count(grp_start, len); 605 grp_start->xor_check_result = result; 606 pr_debug("\t%s: grp_start->xor_check_result: %p\n", 607 __func__, grp_start->xor_check_result); 608 sw_desc->async_tx.flags = flags; 609 while (src_cnt--) 610 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt, 611 dma_src[src_cnt]); 612 } 613 spin_unlock_bh(&iop_chan->lock); 614 615 return sw_desc ? &sw_desc->async_tx : NULL; 616} 617 618static struct dma_async_tx_descriptor * 619iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, 620 unsigned int src_cnt, const unsigned char *scf, size_t len, 621 unsigned long flags) 622{ 623 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 624 struct iop_adma_desc_slot *sw_desc, *g; 625 int slot_cnt, slots_per_op; 626 int continue_srcs; 627 628 if (unlikely(!len)) 629 return NULL; 630 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); 631 632 dev_dbg(iop_chan->device->common.dev, 633 "%s src_cnt: %d len: %u flags: %lx\n", 634 __func__, src_cnt, len, flags); 635 636 if (dmaf_p_disabled_continue(flags)) 637 continue_srcs = 1+src_cnt; 638 else if (dmaf_continue(flags)) 639 continue_srcs = 3+src_cnt; 640 else 641 continue_srcs = 0+src_cnt; 642 643 spin_lock_bh(&iop_chan->lock); 644 slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op); 645 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 646 if (sw_desc) { 647 int i; 648 649 g = sw_desc->group_head; 650 iop_desc_set_byte_count(g, iop_chan, len); 651 652 /* even if P is disabled its destination address (bits 653 * [3:0]) must match Q. It is ok if P points to an 654 * invalid address, it won't be written. 655 */ 656 if (flags & DMA_PREP_PQ_DISABLE_P) 657 dst[0] = dst[1] & 0x7; 658 659 iop_desc_set_pq_addr(g, dst); 660 sw_desc->async_tx.flags = flags; 661 for (i = 0; i < src_cnt; i++) 662 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); 663 664 /* if we are continuing a previous operation factor in 665 * the old p and q values, see the comment for dma_maxpq 666 * in include/linux/dmaengine.h 667 */ 668 if (dmaf_p_disabled_continue(flags)) 669 iop_desc_set_pq_src_addr(g, i++, dst[1], 1); 670 else if (dmaf_continue(flags)) { 671 iop_desc_set_pq_src_addr(g, i++, dst[0], 0); 672 iop_desc_set_pq_src_addr(g, i++, dst[1], 1); 673 iop_desc_set_pq_src_addr(g, i++, dst[1], 0); 674 } 675 iop_desc_init_pq(g, i, flags); 676 } 677 spin_unlock_bh(&iop_chan->lock); 678 679 return sw_desc ? &sw_desc->async_tx : NULL; 680} 681 682static struct dma_async_tx_descriptor * 683iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, 684 unsigned int src_cnt, const unsigned char *scf, 685 size_t len, enum sum_check_flags *pqres, 686 unsigned long flags) 687{ 688 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 689 struct iop_adma_desc_slot *sw_desc, *g; 690 int slot_cnt, slots_per_op; 691 692 if (unlikely(!len)) 693 return NULL; 694 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); 695 696 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", 697 __func__, src_cnt, len); 698 699 spin_lock_bh(&iop_chan->lock); 700 slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op); 701 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 702 if (sw_desc) { 703 /* for validate operations p and q are tagged onto the 704 * end of the source list 705 */ 706 int pq_idx = src_cnt; 707 708 g = sw_desc->group_head; 709 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags); 710 iop_desc_set_pq_zero_sum_byte_count(g, len); 711 g->pq_check_result = pqres; 712 pr_debug("\t%s: g->pq_check_result: %p\n", 713 __func__, g->pq_check_result); 714 sw_desc->async_tx.flags = flags; 715 while (src_cnt--) 716 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, 717 src[src_cnt], 718 scf[src_cnt]); 719 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src); 720 } 721 spin_unlock_bh(&iop_chan->lock); 722 723 return sw_desc ? &sw_desc->async_tx : NULL; 724} 725 726static void iop_adma_free_chan_resources(struct dma_chan *chan) 727{ 728 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 729 struct iop_adma_desc_slot *iter, *_iter; 730 int in_use_descs = 0; 731 732 iop_adma_slot_cleanup(iop_chan); 733 734 spin_lock_bh(&iop_chan->lock); 735 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, 736 chain_node) { 737 in_use_descs++; 738 list_del(&iter->chain_node); 739 } 740 list_for_each_entry_safe_reverse( 741 iter, _iter, &iop_chan->all_slots, slot_node) { 742 list_del(&iter->slot_node); 743 kfree(iter); 744 iop_chan->slots_allocated--; 745 } 746 iop_chan->last_used = NULL; 747 748 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", 749 __func__, iop_chan->slots_allocated); 750 spin_unlock_bh(&iop_chan->lock); 751 752 /* one is ok since we left it on there on purpose */ 753 if (in_use_descs > 1) 754 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n", 755 in_use_descs - 1); 756} 757 758/** 759 * iop_adma_status - poll the status of an ADMA transaction 760 * @chan: ADMA channel handle 761 * @cookie: ADMA transaction identifier 762 * @txstate: a holder for the current state of the channel or NULL 763 */ 764static enum dma_status iop_adma_status(struct dma_chan *chan, 765 dma_cookie_t cookie, 766 struct dma_tx_state *txstate) 767{ 768 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 769 int ret; 770 771 ret = dma_cookie_status(chan, cookie, txstate); 772 if (ret == DMA_COMPLETE) 773 return ret; 774 775 iop_adma_slot_cleanup(iop_chan); 776 777 return dma_cookie_status(chan, cookie, txstate); 778} 779 780static irqreturn_t iop_adma_eot_handler(int irq, void *data) 781{ 782 struct iop_adma_chan *chan = data; 783 784 dev_dbg(chan->device->common.dev, "%s\n", __func__); 785 786 tasklet_schedule(&chan->irq_tasklet); 787 788 iop_adma_device_clear_eot_status(chan); 789 790 return IRQ_HANDLED; 791} 792 793static irqreturn_t iop_adma_eoc_handler(int irq, void *data) 794{ 795 struct iop_adma_chan *chan = data; 796 797 dev_dbg(chan->device->common.dev, "%s\n", __func__); 798 799 tasklet_schedule(&chan->irq_tasklet); 800 801 iop_adma_device_clear_eoc_status(chan); 802 803 return IRQ_HANDLED; 804} 805 806static irqreturn_t iop_adma_err_handler(int irq, void *data) 807{ 808 struct iop_adma_chan *chan = data; 809 unsigned long status = iop_chan_get_status(chan); 810 811 dev_err(chan->device->common.dev, 812 "error ( %s%s%s%s%s%s%s)\n", 813 iop_is_err_int_parity(status, chan) ? "int_parity " : "", 814 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "", 815 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "", 816 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "", 817 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "", 818 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "", 819 iop_is_err_split_tx(status, chan) ? "split_tx " : ""); 820 821 iop_adma_device_clear_err_status(chan); 822 823 BUG(); 824 825 return IRQ_HANDLED; 826} 827 828static void iop_adma_issue_pending(struct dma_chan *chan) 829{ 830 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 831 832 if (iop_chan->pending) { 833 iop_chan->pending = 0; 834 iop_chan_append(iop_chan); 835 } 836} 837 838/* 839 * Perform a transaction to verify the HW works. 840 */ 841#define IOP_ADMA_TEST_SIZE 2000 842 843static int iop_adma_memcpy_self_test(struct iop_adma_device *device) 844{ 845 int i; 846 void *src, *dest; 847 dma_addr_t src_dma, dest_dma; 848 struct dma_chan *dma_chan; 849 dma_cookie_t cookie; 850 struct dma_async_tx_descriptor *tx; 851 int err = 0; 852 struct iop_adma_chan *iop_chan; 853 854 dev_dbg(device->common.dev, "%s\n", __func__); 855 856 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 857 if (!src) 858 return -ENOMEM; 859 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL); 860 if (!dest) { 861 kfree(src); 862 return -ENOMEM; 863 } 864 865 /* Fill in src buffer */ 866 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++) 867 ((u8 *) src)[i] = (u8)i; 868 869 /* Start copy, using first DMA channel */ 870 dma_chan = container_of(device->common.channels.next, 871 struct dma_chan, 872 device_node); 873 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 874 err = -ENODEV; 875 goto out; 876 } 877 878 dest_dma = dma_map_single(dma_chan->device->dev, dest, 879 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 880 src_dma = dma_map_single(dma_chan->device->dev, src, 881 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE); 882 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 883 IOP_ADMA_TEST_SIZE, 884 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 885 886 cookie = iop_adma_tx_submit(tx); 887 iop_adma_issue_pending(dma_chan); 888 msleep(1); 889 890 if (iop_adma_status(dma_chan, cookie, NULL) != 891 DMA_COMPLETE) { 892 dev_err(dma_chan->device->dev, 893 "Self-test copy timed out, disabling\n"); 894 err = -ENODEV; 895 goto free_resources; 896 } 897 898 iop_chan = to_iop_adma_chan(dma_chan); 899 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 900 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE); 901 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) { 902 dev_err(dma_chan->device->dev, 903 "Self-test copy failed compare, disabling\n"); 904 err = -ENODEV; 905 goto free_resources; 906 } 907 908free_resources: 909 iop_adma_free_chan_resources(dma_chan); 910out: 911 kfree(src); 912 kfree(dest); 913 return err; 914} 915 916#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ 917static int 918iop_adma_xor_val_self_test(struct iop_adma_device *device) 919{ 920 int i, src_idx; 921 struct page *dest; 922 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST]; 923 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 924 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1]; 925 dma_addr_t dest_dma; 926 struct dma_async_tx_descriptor *tx; 927 struct dma_chan *dma_chan; 928 dma_cookie_t cookie; 929 u8 cmp_byte = 0; 930 u32 cmp_word; 931 u32 zero_sum_result; 932 int err = 0; 933 struct iop_adma_chan *iop_chan; 934 935 dev_dbg(device->common.dev, "%s\n", __func__); 936 937 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 938 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 939 if (!xor_srcs[src_idx]) { 940 while (src_idx--) 941 __free_page(xor_srcs[src_idx]); 942 return -ENOMEM; 943 } 944 } 945 946 dest = alloc_page(GFP_KERNEL); 947 if (!dest) { 948 while (src_idx--) 949 __free_page(xor_srcs[src_idx]); 950 return -ENOMEM; 951 } 952 953 /* Fill in src buffers */ 954 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { 955 u8 *ptr = page_address(xor_srcs[src_idx]); 956 for (i = 0; i < PAGE_SIZE; i++) 957 ptr[i] = (1 << src_idx); 958 } 959 960 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) 961 cmp_byte ^= (u8) (1 << src_idx); 962 963 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 964 (cmp_byte << 8) | cmp_byte; 965 966 memset(page_address(dest), 0, PAGE_SIZE); 967 968 dma_chan = container_of(device->common.channels.next, 969 struct dma_chan, 970 device_node); 971 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 972 err = -ENODEV; 973 goto out; 974 } 975 976 /* test xor */ 977 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, 978 PAGE_SIZE, DMA_FROM_DEVICE); 979 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 980 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 981 0, PAGE_SIZE, DMA_TO_DEVICE); 982 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 983 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 984 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 985 986 cookie = iop_adma_tx_submit(tx); 987 iop_adma_issue_pending(dma_chan); 988 msleep(8); 989 990 if (iop_adma_status(dma_chan, cookie, NULL) != 991 DMA_COMPLETE) { 992 dev_err(dma_chan->device->dev, 993 "Self-test xor timed out, disabling\n"); 994 err = -ENODEV; 995 goto free_resources; 996 } 997 998 iop_chan = to_iop_adma_chan(dma_chan); 999 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma, 1000 PAGE_SIZE, DMA_FROM_DEVICE); 1001 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1002 u32 *ptr = page_address(dest); 1003 if (ptr[i] != cmp_word) { 1004 dev_err(dma_chan->device->dev, 1005 "Self-test xor failed compare, disabling\n"); 1006 err = -ENODEV; 1007 goto free_resources; 1008 } 1009 } 1010 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma, 1011 PAGE_SIZE, DMA_TO_DEVICE); 1012 1013 /* skip zero sum if the capability is not present */ 1014 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1015 goto free_resources; 1016 1017 /* zero sum the sources with the destintation page */ 1018 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1019 zero_sum_srcs[i] = xor_srcs[i]; 1020 zero_sum_srcs[i] = dest; 1021 1022 zero_sum_result = 1; 1023 1024 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1025 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1026 zero_sum_srcs[i], 0, PAGE_SIZE, 1027 DMA_TO_DEVICE); 1028 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, 1029 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1030 &zero_sum_result, 1031 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1032 1033 cookie = iop_adma_tx_submit(tx); 1034 iop_adma_issue_pending(dma_chan); 1035 msleep(8); 1036 1037 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1038 dev_err(dma_chan->device->dev, 1039 "Self-test zero sum timed out, disabling\n"); 1040 err = -ENODEV; 1041 goto free_resources; 1042 } 1043 1044 if (zero_sum_result != 0) { 1045 dev_err(dma_chan->device->dev, 1046 "Self-test zero sum failed compare, disabling\n"); 1047 err = -ENODEV; 1048 goto free_resources; 1049 } 1050 1051 /* test for non-zero parity sum */ 1052 zero_sum_result = 0; 1053 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) 1054 dma_srcs[i] = dma_map_page(dma_chan->device->dev, 1055 zero_sum_srcs[i], 0, PAGE_SIZE, 1056 DMA_TO_DEVICE); 1057 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, 1058 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, 1059 &zero_sum_result, 1060 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1061 1062 cookie = iop_adma_tx_submit(tx); 1063 iop_adma_issue_pending(dma_chan); 1064 msleep(8); 1065 1066 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1067 dev_err(dma_chan->device->dev, 1068 "Self-test non-zero sum timed out, disabling\n"); 1069 err = -ENODEV; 1070 goto free_resources; 1071 } 1072 1073 if (zero_sum_result != 1) { 1074 dev_err(dma_chan->device->dev, 1075 "Self-test non-zero sum failed compare, disabling\n"); 1076 err = -ENODEV; 1077 goto free_resources; 1078 } 1079 1080free_resources: 1081 iop_adma_free_chan_resources(dma_chan); 1082out: 1083 src_idx = IOP_ADMA_NUM_SRC_TEST; 1084 while (src_idx--) 1085 __free_page(xor_srcs[src_idx]); 1086 __free_page(dest); 1087 return err; 1088} 1089 1090#ifdef CONFIG_RAID6_PQ 1091static int 1092iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) 1093{ 1094 /* combined sources, software pq results, and extra hw pq results */ 1095 struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; 1096 /* ptr to the extra hw pq buffers defined above */ 1097 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; 1098 /* address conversion buffers (dma_map / page_address) */ 1099 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; 1100 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2]; 1101 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST]; 1102 1103 int i; 1104 struct dma_async_tx_descriptor *tx; 1105 struct dma_chan *dma_chan; 1106 dma_cookie_t cookie; 1107 u32 zero_sum_result; 1108 int err = 0; 1109 struct device *dev; 1110 1111 dev_dbg(device->common.dev, "%s\n", __func__); 1112 1113 for (i = 0; i < ARRAY_SIZE(pq); i++) { 1114 pq[i] = alloc_page(GFP_KERNEL); 1115 if (!pq[i]) { 1116 while (i--) 1117 __free_page(pq[i]); 1118 return -ENOMEM; 1119 } 1120 } 1121 1122 /* Fill in src buffers */ 1123 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { 1124 pq_sw[i] = page_address(pq[i]); 1125 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE); 1126 } 1127 pq_sw[i] = page_address(pq[i]); 1128 pq_sw[i+1] = page_address(pq[i+1]); 1129 1130 dma_chan = container_of(device->common.channels.next, 1131 struct dma_chan, 1132 device_node); 1133 if (iop_adma_alloc_chan_resources(dma_chan) < 1) { 1134 err = -ENODEV; 1135 goto out; 1136 } 1137 1138 dev = dma_chan->device->dev; 1139 1140 /* initialize the dests */ 1141 memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); 1142 memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); 1143 1144 /* test pq */ 1145 pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); 1146 pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); 1147 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) 1148 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, 1149 DMA_TO_DEVICE); 1150 1151 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, 1152 IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, 1153 PAGE_SIZE, 1154 DMA_PREP_INTERRUPT | 1155 DMA_CTRL_ACK); 1156 1157 cookie = iop_adma_tx_submit(tx); 1158 iop_adma_issue_pending(dma_chan); 1159 msleep(8); 1160 1161 if (iop_adma_status(dma_chan, cookie, NULL) != 1162 DMA_COMPLETE) { 1163 dev_err(dev, "Self-test pq timed out, disabling\n"); 1164 err = -ENODEV; 1165 goto free_resources; 1166 } 1167 1168 raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); 1169 1170 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], 1171 page_address(pq_hw[0]), PAGE_SIZE) != 0) { 1172 dev_err(dev, "Self-test p failed compare, disabling\n"); 1173 err = -ENODEV; 1174 goto free_resources; 1175 } 1176 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], 1177 page_address(pq_hw[1]), PAGE_SIZE) != 0) { 1178 dev_err(dev, "Self-test q failed compare, disabling\n"); 1179 err = -ENODEV; 1180 goto free_resources; 1181 } 1182 1183 /* test correct zero sum using the software generated pq values */ 1184 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) 1185 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, 1186 DMA_TO_DEVICE); 1187 1188 zero_sum_result = ~0; 1189 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], 1190 pq_src, IOP_ADMA_NUM_SRC_TEST, 1191 raid6_gfexp, PAGE_SIZE, &zero_sum_result, 1192 DMA_PREP_INTERRUPT|DMA_CTRL_ACK); 1193 1194 cookie = iop_adma_tx_submit(tx); 1195 iop_adma_issue_pending(dma_chan); 1196 msleep(8); 1197 1198 if (iop_adma_status(dma_chan, cookie, NULL) != 1199 DMA_COMPLETE) { 1200 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1201 err = -ENODEV; 1202 goto free_resources; 1203 } 1204 1205 if (zero_sum_result != 0) { 1206 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", 1207 zero_sum_result); 1208 err = -ENODEV; 1209 goto free_resources; 1210 } 1211 1212 /* test incorrect zero sum */ 1213 i = IOP_ADMA_NUM_SRC_TEST; 1214 memset(pq_sw[i] + 100, 0, 100); 1215 memset(pq_sw[i+1] + 200, 0, 200); 1216 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) 1217 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, 1218 DMA_TO_DEVICE); 1219 1220 zero_sum_result = 0; 1221 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], 1222 pq_src, IOP_ADMA_NUM_SRC_TEST, 1223 raid6_gfexp, PAGE_SIZE, &zero_sum_result, 1224 DMA_PREP_INTERRUPT|DMA_CTRL_ACK); 1225 1226 cookie = iop_adma_tx_submit(tx); 1227 iop_adma_issue_pending(dma_chan); 1228 msleep(8); 1229 1230 if (iop_adma_status(dma_chan, cookie, NULL) != 1231 DMA_COMPLETE) { 1232 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1233 err = -ENODEV; 1234 goto free_resources; 1235 } 1236 1237 if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { 1238 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", 1239 zero_sum_result); 1240 err = -ENODEV; 1241 goto free_resources; 1242 } 1243 1244free_resources: 1245 iop_adma_free_chan_resources(dma_chan); 1246out: 1247 i = ARRAY_SIZE(pq); 1248 while (i--) 1249 __free_page(pq[i]); 1250 return err; 1251} 1252#endif 1253 1254static int iop_adma_remove(struct platform_device *dev) 1255{ 1256 struct iop_adma_device *device = platform_get_drvdata(dev); 1257 struct dma_chan *chan, *_chan; 1258 struct iop_adma_chan *iop_chan; 1259 struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev); 1260 1261 dma_async_device_unregister(&device->common); 1262 1263 dma_free_coherent(&dev->dev, plat_data->pool_size, 1264 device->dma_desc_pool_virt, device->dma_desc_pool); 1265 1266 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1267 device_node) { 1268 iop_chan = to_iop_adma_chan(chan); 1269 list_del(&chan->device_node); 1270 kfree(iop_chan); 1271 } 1272 kfree(device); 1273 1274 return 0; 1275} 1276 1277static int iop_adma_probe(struct platform_device *pdev) 1278{ 1279 struct resource *res; 1280 int ret = 0, i; 1281 struct iop_adma_device *adev; 1282 struct iop_adma_chan *iop_chan; 1283 struct dma_device *dma_dev; 1284 struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev); 1285 1286 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1287 if (!res) 1288 return -ENODEV; 1289 1290 if (!devm_request_mem_region(&pdev->dev, res->start, 1291 resource_size(res), pdev->name)) 1292 return -EBUSY; 1293 1294 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 1295 if (!adev) 1296 return -ENOMEM; 1297 dma_dev = &adev->common; 1298 1299 /* allocate coherent memory for hardware descriptors 1300 * note: writecombine gives slightly better performance, but 1301 * requires that we explicitly flush the writes 1302 */ 1303 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1304 plat_data->pool_size, 1305 &adev->dma_desc_pool, 1306 GFP_KERNEL)) == NULL) { 1307 ret = -ENOMEM; 1308 goto err_free_adev; 1309 } 1310 1311 dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n", 1312 __func__, adev->dma_desc_pool_virt, 1313 (void *) adev->dma_desc_pool); 1314 1315 adev->id = plat_data->hw_id; 1316 1317 /* discover transaction capabilites from the platform data */ 1318 dma_dev->cap_mask = plat_data->cap_mask; 1319 1320 adev->pdev = pdev; 1321 platform_set_drvdata(pdev, adev); 1322 1323 INIT_LIST_HEAD(&dma_dev->channels); 1324 1325 /* set base routines */ 1326 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1327 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1328 dma_dev->device_tx_status = iop_adma_status; 1329 dma_dev->device_issue_pending = iop_adma_issue_pending; 1330 dma_dev->dev = &pdev->dev; 1331 1332 /* set prep routines based on capability */ 1333 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1334 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy; 1335 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1336 dma_dev->max_xor = iop_adma_get_max_xor(); 1337 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; 1338 } 1339 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) 1340 dma_dev->device_prep_dma_xor_val = 1341 iop_adma_prep_dma_xor_val; 1342 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 1343 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0); 1344 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq; 1345 } 1346 if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) 1347 dma_dev->device_prep_dma_pq_val = 1348 iop_adma_prep_dma_pq_val; 1349 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1350 dma_dev->device_prep_dma_interrupt = 1351 iop_adma_prep_dma_interrupt; 1352 1353 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL); 1354 if (!iop_chan) { 1355 ret = -ENOMEM; 1356 goto err_free_dma; 1357 } 1358 iop_chan->device = adev; 1359 1360 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start, 1361 resource_size(res)); 1362 if (!iop_chan->mmr_base) { 1363 ret = -ENOMEM; 1364 goto err_free_iop_chan; 1365 } 1366 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long) 1367 iop_chan); 1368 1369 /* clear errors before enabling interrupts */ 1370 iop_adma_device_clear_err_status(iop_chan); 1371 1372 for (i = 0; i < 3; i++) { 1373 irq_handler_t handler[] = { iop_adma_eot_handler, 1374 iop_adma_eoc_handler, 1375 iop_adma_err_handler }; 1376 int irq = platform_get_irq(pdev, i); 1377 if (irq < 0) { 1378 ret = -ENXIO; 1379 goto err_free_iop_chan; 1380 } else { 1381 ret = devm_request_irq(&pdev->dev, irq, 1382 handler[i], 0, pdev->name, iop_chan); 1383 if (ret) 1384 goto err_free_iop_chan; 1385 } 1386 } 1387 1388 spin_lock_init(&iop_chan->lock); 1389 INIT_LIST_HEAD(&iop_chan->chain); 1390 INIT_LIST_HEAD(&iop_chan->all_slots); 1391 iop_chan->common.device = dma_dev; 1392 dma_cookie_init(&iop_chan->common); 1393 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels); 1394 1395 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1396 ret = iop_adma_memcpy_self_test(adev); 1397 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1398 if (ret) 1399 goto err_free_iop_chan; 1400 } 1401 1402 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1403 ret = iop_adma_xor_val_self_test(adev); 1404 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1405 if (ret) 1406 goto err_free_iop_chan; 1407 } 1408 1409 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && 1410 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { 1411 #ifdef CONFIG_RAID6_PQ 1412 ret = iop_adma_pq_zero_sum_self_test(adev); 1413 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); 1414 #else 1415 /* can not test raid6, so do not publish capability */ 1416 dma_cap_clear(DMA_PQ, dma_dev->cap_mask); 1417 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); 1418 ret = 0; 1419 #endif 1420 if (ret) 1421 goto err_free_iop_chan; 1422 } 1423 1424 dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n", 1425 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", 1426 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", 1427 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1428 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", 1429 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1430 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1431 1432 dma_async_device_register(dma_dev); 1433 goto out; 1434 1435 err_free_iop_chan: 1436 kfree(iop_chan); 1437 err_free_dma: 1438 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1439 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1440 err_free_adev: 1441 kfree(adev); 1442 out: 1443 return ret; 1444} 1445 1446static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) 1447{ 1448 struct iop_adma_desc_slot *sw_desc, *grp_start; 1449 dma_cookie_t cookie; 1450 int slot_cnt, slots_per_op; 1451 1452 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1453 1454 spin_lock_bh(&iop_chan->lock); 1455 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); 1456 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1457 if (sw_desc) { 1458 grp_start = sw_desc->group_head; 1459 1460 list_splice_init(&sw_desc->tx_list, &iop_chan->chain); 1461 async_tx_ack(&sw_desc->async_tx); 1462 iop_desc_init_memcpy(grp_start, 0); 1463 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1464 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1465 iop_desc_set_memcpy_src_addr(grp_start, 0); 1466 1467 cookie = dma_cookie_assign(&sw_desc->async_tx); 1468 1469 /* initialize the completed cookie to be less than 1470 * the most recently used cookie 1471 */ 1472 iop_chan->common.completed_cookie = cookie - 1; 1473 1474 /* channel should not be busy */ 1475 BUG_ON(iop_chan_is_busy(iop_chan)); 1476 1477 /* clear any prior error-status bits */ 1478 iop_adma_device_clear_err_status(iop_chan); 1479 1480 /* disable operation */ 1481 iop_chan_disable(iop_chan); 1482 1483 /* set the descriptor address */ 1484 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1485 1486 /* 1/ don't add pre-chained descriptors 1487 * 2/ dummy read to flush next_desc write 1488 */ 1489 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1490 1491 /* run the descriptor */ 1492 iop_chan_enable(iop_chan); 1493 } else 1494 dev_err(iop_chan->device->common.dev, 1495 "failed to allocate null descriptor\n"); 1496 spin_unlock_bh(&iop_chan->lock); 1497} 1498 1499static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) 1500{ 1501 struct iop_adma_desc_slot *sw_desc, *grp_start; 1502 dma_cookie_t cookie; 1503 int slot_cnt, slots_per_op; 1504 1505 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); 1506 1507 spin_lock_bh(&iop_chan->lock); 1508 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); 1509 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); 1510 if (sw_desc) { 1511 grp_start = sw_desc->group_head; 1512 list_splice_init(&sw_desc->tx_list, &iop_chan->chain); 1513 async_tx_ack(&sw_desc->async_tx); 1514 iop_desc_init_null_xor(grp_start, 2, 0); 1515 iop_desc_set_byte_count(grp_start, iop_chan, 0); 1516 iop_desc_set_dest_addr(grp_start, iop_chan, 0); 1517 iop_desc_set_xor_src_addr(grp_start, 0, 0); 1518 iop_desc_set_xor_src_addr(grp_start, 1, 0); 1519 1520 cookie = dma_cookie_assign(&sw_desc->async_tx); 1521 1522 /* initialize the completed cookie to be less than 1523 * the most recently used cookie 1524 */ 1525 iop_chan->common.completed_cookie = cookie - 1; 1526 1527 /* channel should not be busy */ 1528 BUG_ON(iop_chan_is_busy(iop_chan)); 1529 1530 /* clear any prior error-status bits */ 1531 iop_adma_device_clear_err_status(iop_chan); 1532 1533 /* disable operation */ 1534 iop_chan_disable(iop_chan); 1535 1536 /* set the descriptor address */ 1537 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys); 1538 1539 /* 1/ don't add pre-chained descriptors 1540 * 2/ dummy read to flush next_desc write 1541 */ 1542 BUG_ON(iop_desc_get_next_desc(sw_desc)); 1543 1544 /* run the descriptor */ 1545 iop_chan_enable(iop_chan); 1546 } else 1547 dev_err(iop_chan->device->common.dev, 1548 "failed to allocate null descriptor\n"); 1549 spin_unlock_bh(&iop_chan->lock); 1550} 1551 1552static struct platform_driver iop_adma_driver = { 1553 .probe = iop_adma_probe, 1554 .remove = iop_adma_remove, 1555 .driver = { 1556 .name = "iop-adma", 1557 }, 1558}; 1559 1560module_platform_driver(iop_adma_driver); 1561 1562MODULE_AUTHOR("Intel Corporation"); 1563MODULE_DESCRIPTION("IOP ADMA Engine Driver"); 1564MODULE_LICENSE("GPL"); 1565MODULE_ALIAS("platform:iop-adma"); 1566