1/* 2 * 3 * Linux MegaRAID device driver 4 * 5 * Copyright (c) 2003-2004 LSI Logic Corporation. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 * 12 * FILE : megaraid_mm.c 13 * Version : v2.20.2.7 (Jul 16 2006) 14 * 15 * Common management module 16 */ 17#include <linux/sched.h> 18#include <linux/slab.h> 19#include <linux/mutex.h> 20#include "megaraid_mm.h" 21 22 23// Entry points for char node driver 24static DEFINE_MUTEX(mraid_mm_mutex); 25static int mraid_mm_open(struct inode *, struct file *); 26static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); 27 28 29// routines to convert to and from the old the format 30static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); 31static int kioc_to_mimd(uioc_t *, mimd_t __user *); 32 33 34// Helper functions 35static int handle_drvrcmd(void __user *, uint8_t, int *); 36static int lld_ioctl(mraid_mmadp_t *, uioc_t *); 37static void ioctl_done(uioc_t *); 38static void lld_timedout(unsigned long); 39static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); 40static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); 41static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); 42static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); 43static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); 44static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); 45static void mraid_mm_free_adp_resources(mraid_mmadp_t *); 46static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); 47 48#ifdef CONFIG_COMPAT 49static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long); 50#endif 51 52MODULE_AUTHOR("LSI Logic Corporation"); 53MODULE_DESCRIPTION("LSI Logic Management Module"); 54MODULE_LICENSE("GPL"); 55MODULE_VERSION(LSI_COMMON_MOD_VERSION); 56 57static int dbglevel = CL_ANN; 58module_param_named(dlevel, dbglevel, int, 0); 59MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); 60 61EXPORT_SYMBOL(mraid_mm_register_adp); 62EXPORT_SYMBOL(mraid_mm_unregister_adp); 63EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 64 65static uint32_t drvr_ver = 0x02200207; 66 67static int adapters_count_g; 68static struct list_head adapters_list_g; 69 70static wait_queue_head_t wait_q; 71 72static const struct file_operations lsi_fops = { 73 .open = mraid_mm_open, 74 .unlocked_ioctl = mraid_mm_unlocked_ioctl, 75#ifdef CONFIG_COMPAT 76 .compat_ioctl = mraid_mm_compat_ioctl, 77#endif 78 .owner = THIS_MODULE, 79 .llseek = noop_llseek, 80}; 81 82static struct miscdevice megaraid_mm_dev = { 83 .minor = MISC_DYNAMIC_MINOR, 84 .name = "megadev0", 85 .fops = &lsi_fops, 86}; 87 88/** 89 * mraid_mm_open - open routine for char node interface 90 * @inode : unused 91 * @filep : unused 92 * 93 * Allow ioctl operations by apps only if they have superuser privilege. 94 */ 95static int 96mraid_mm_open(struct inode *inode, struct file *filep) 97{ 98 /* 99 * Only allow superuser to access private ioctl interface 100 */ 101 if (!capable(CAP_SYS_ADMIN)) return (-EACCES); 102 103 return 0; 104} 105 106/** 107 * mraid_mm_ioctl - module entry-point for ioctls 108 * @inode : inode (ignored) 109 * @filep : file operations pointer (ignored) 110 * @cmd : ioctl command 111 * @arg : user ioctl packet 112 */ 113static int 114mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 115{ 116 uioc_t *kioc; 117 char signature[EXT_IOCTL_SIGN_SZ] = {0}; 118 int rval; 119 mraid_mmadp_t *adp; 120 uint8_t old_ioctl; 121 int drvrcmd_rval; 122 void __user *argp = (void __user *)arg; 123 124 /* 125 * Make sure only USCSICMD are issued through this interface. 126 * MIMD application would still fire different command. 127 */ 128 129 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { 130 return (-EINVAL); 131 } 132 133 /* 134 * Look for signature to see if this is the new or old ioctl format. 135 */ 136 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { 137 con_log(CL_ANN, (KERN_WARNING 138 "megaraid cmm: copy from usr addr failed\n")); 139 return (-EFAULT); 140 } 141 142 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) 143 old_ioctl = 0; 144 else 145 old_ioctl = 1; 146 147 /* 148 * At present, we don't support the new ioctl packet 149 */ 150 if (!old_ioctl ) 151 return (-EINVAL); 152 153 /* 154 * If it is a driver ioctl (as opposed to fw ioctls), then we can 155 * handle the command locally. rval > 0 means it is not a drvr cmd 156 */ 157 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); 158 159 if (rval < 0) 160 return rval; 161 else if (rval == 0) 162 return drvrcmd_rval; 163 164 rval = 0; 165 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { 166 return rval; 167 } 168 169 /* 170 * Check if adapter can accept ioctl. We may have marked it offline 171 * if any previous kioc had timedout on this controller. 172 */ 173 if (!adp->quiescent) { 174 con_log(CL_ANN, (KERN_WARNING 175 "megaraid cmm: controller cannot accept cmds due to " 176 "earlier errors\n" )); 177 return -EFAULT; 178 } 179 180 /* 181 * The following call will block till a kioc is available 182 */ 183 kioc = mraid_mm_alloc_kioc(adp); 184 185 /* 186 * User sent the old mimd_t ioctl packet. Convert it to uioc_t. 187 */ 188 if ((rval = mimd_to_kioc(argp, adp, kioc))) { 189 mraid_mm_dealloc_kioc(adp, kioc); 190 return rval; 191 } 192 193 kioc->done = ioctl_done; 194 195 /* 196 * Issue the IOCTL to the low level driver. After the IOCTL completes 197 * release the kioc if and only if it was _not_ timedout. If it was 198 * timedout, that means that resources are still with low level driver. 199 */ 200 if ((rval = lld_ioctl(adp, kioc))) { 201 202 if (!kioc->timedout) 203 mraid_mm_dealloc_kioc(adp, kioc); 204 205 return rval; 206 } 207 208 /* 209 * Convert the kioc back to user space 210 */ 211 rval = kioc_to_mimd(kioc, argp); 212 213 /* 214 * Return the kioc to free pool 215 */ 216 mraid_mm_dealloc_kioc(adp, kioc); 217 218 return rval; 219} 220 221static long 222mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, 223 unsigned long arg) 224{ 225 int err; 226 227 /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */ 228 mutex_lock(&mraid_mm_mutex); 229 err = mraid_mm_ioctl(filep, cmd, arg); 230 mutex_unlock(&mraid_mm_mutex); 231 232 return err; 233} 234 235/** 236 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet 237 * @umimd : User space mimd_t ioctl packet 238 * @rval : returned success/error status 239 * 240 * The function return value is a pointer to the located @adapter. 241 */ 242static mraid_mmadp_t * 243mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) 244{ 245 mraid_mmadp_t *adapter; 246 mimd_t mimd; 247 uint32_t adapno; 248 int iterator; 249 250 251 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { 252 *rval = -EFAULT; 253 return NULL; 254 } 255 256 adapno = GETADAP(mimd.ui.fcs.adapno); 257 258 if (adapno >= adapters_count_g) { 259 *rval = -ENODEV; 260 return NULL; 261 } 262 263 adapter = NULL; 264 iterator = 0; 265 266 list_for_each_entry(adapter, &adapters_list_g, list) { 267 if (iterator++ == adapno) break; 268 } 269 270 if (!adapter) { 271 *rval = -ENODEV; 272 return NULL; 273 } 274 275 return adapter; 276} 277 278/** 279 * handle_drvrcmd - Checks if the opcode is a driver cmd and if it is, handles it. 280 * @arg : packet sent by the user app 281 * @old_ioctl : mimd if 1; uioc otherwise 282 * @rval : pointer for command's returned value (not function status) 283 */ 284static int 285handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) 286{ 287 mimd_t __user *umimd; 288 mimd_t kmimd; 289 uint8_t opcode; 290 uint8_t subopcode; 291 292 if (old_ioctl) 293 goto old_packet; 294 else 295 goto new_packet; 296 297new_packet: 298 return (-ENOTSUPP); 299 300old_packet: 301 *rval = 0; 302 umimd = arg; 303 304 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) 305 return (-EFAULT); 306 307 opcode = kmimd.ui.fcs.opcode; 308 subopcode = kmimd.ui.fcs.subopcode; 309 310 /* 311 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or 312 * GET_NUMADP, then we can handle. Otherwise we should return 1 to 313 * indicate that we cannot handle this. 314 */ 315 if (opcode != 0x82) 316 return 1; 317 318 switch (subopcode) { 319 320 case MEGAIOC_QDRVRVER: 321 322 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) 323 return (-EFAULT); 324 325 return 0; 326 327 case MEGAIOC_QNADAP: 328 329 *rval = adapters_count_g; 330 331 if (copy_to_user(kmimd.data, &adapters_count_g, 332 sizeof(uint32_t))) 333 return (-EFAULT); 334 335 return 0; 336 337 default: 338 /* cannot handle */ 339 return 1; 340 } 341 342 return 0; 343} 344 345 346/** 347 * mimd_to_kioc - Converter from old to new ioctl format 348 * @umimd : user space old MIMD IOCTL 349 * @adp : adapter softstate 350 * @kioc : kernel space new format IOCTL 351 * 352 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The 353 * new packet is in kernel space so that driver can perform operations on it 354 * freely. 355 */ 356 357static int 358mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) 359{ 360 mbox64_t *mbox64; 361 mbox_t *mbox; 362 mraid_passthru_t *pthru32; 363 uint32_t adapno; 364 uint8_t opcode; 365 uint8_t subopcode; 366 mimd_t mimd; 367 368 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) 369 return (-EFAULT); 370 371 /* 372 * Applications are not allowed to send extd pthru 373 */ 374 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || 375 (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) 376 return (-EINVAL); 377 378 opcode = mimd.ui.fcs.opcode; 379 subopcode = mimd.ui.fcs.subopcode; 380 adapno = GETADAP(mimd.ui.fcs.adapno); 381 382 if (adapno >= adapters_count_g) 383 return (-ENODEV); 384 385 kioc->adapno = adapno; 386 kioc->mb_type = MBOX_LEGACY; 387 kioc->app_type = APPTYPE_MIMD; 388 389 switch (opcode) { 390 391 case 0x82: 392 393 if (subopcode == MEGAIOC_QADAPINFO) { 394 395 kioc->opcode = GET_ADAP_INFO; 396 kioc->data_dir = UIOC_RD; 397 kioc->xferlen = sizeof(mraid_hba_info_t); 398 399 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 400 return (-ENOMEM); 401 } 402 else { 403 con_log(CL_ANN, (KERN_WARNING 404 "megaraid cmm: Invalid subop\n")); 405 return (-EINVAL); 406 } 407 408 break; 409 410 case 0x81: 411 412 kioc->opcode = MBOX_CMD; 413 kioc->xferlen = mimd.ui.fcs.length; 414 kioc->user_data_len = kioc->xferlen; 415 kioc->user_data = mimd.ui.fcs.buffer; 416 417 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 418 return (-ENOMEM); 419 420 if (mimd.outlen) kioc->data_dir = UIOC_RD; 421 if (mimd.inlen) kioc->data_dir |= UIOC_WR; 422 423 break; 424 425 case 0x80: 426 427 kioc->opcode = MBOX_CMD; 428 kioc->xferlen = (mimd.outlen > mimd.inlen) ? 429 mimd.outlen : mimd.inlen; 430 kioc->user_data_len = kioc->xferlen; 431 kioc->user_data = mimd.data; 432 433 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) 434 return (-ENOMEM); 435 436 if (mimd.outlen) kioc->data_dir = UIOC_RD; 437 if (mimd.inlen) kioc->data_dir |= UIOC_WR; 438 439 break; 440 441 default: 442 return (-EINVAL); 443 } 444 445 /* 446 * If driver command, nothing else to do 447 */ 448 if (opcode == 0x82) 449 return 0; 450 451 /* 452 * This is a mailbox cmd; copy the mailbox from mimd 453 */ 454 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); 455 mbox = &mbox64->mbox32; 456 memcpy(mbox, mimd.mbox, 14); 457 458 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD 459 460 mbox->xferaddr = (uint32_t)kioc->buf_paddr; 461 462 if (kioc->data_dir & UIOC_WR) { 463 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 464 kioc->xferlen)) { 465 return (-EFAULT); 466 } 467 } 468 469 return 0; 470 } 471 472 /* 473 * This is a regular 32-bit pthru cmd; mbox points to pthru struct. 474 * Just like in above case, the beginning for memblk is treated as 475 * a mailbox. The passthru will begin at next 1K boundary. And the 476 * data will start 1K after that. 477 */ 478 pthru32 = kioc->pthru32; 479 kioc->user_pthru = &umimd->pthru; 480 mbox->xferaddr = (uint32_t)kioc->pthru32_h; 481 482 if (copy_from_user(pthru32, kioc->user_pthru, 483 sizeof(mraid_passthru_t))) { 484 return (-EFAULT); 485 } 486 487 pthru32->dataxferaddr = kioc->buf_paddr; 488 if (kioc->data_dir & UIOC_WR) { 489 if (pthru32->dataxferlen > kioc->xferlen) 490 return -EINVAL; 491 if (copy_from_user(kioc->buf_vaddr, kioc->user_data, 492 pthru32->dataxferlen)) { 493 return (-EFAULT); 494 } 495 } 496 497 return 0; 498} 499 500/** 501 * mraid_mm_attch_buf - Attach a free dma buffer for required size 502 * @adp : Adapter softstate 503 * @kioc : kioc that the buffer needs to be attached to 504 * @xferlen : required length for buffer 505 * 506 * First we search for a pool with smallest buffer that is >= @xferlen. If 507 * that pool has no free buffer, we will try for the next bigger size. If none 508 * is available, we will try to allocate the smallest buffer that is >= 509 * @xferlen and attach it the pool. 510 */ 511static int 512mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) 513{ 514 mm_dmapool_t *pool; 515 int right_pool = -1; 516 unsigned long flags; 517 int i; 518 519 kioc->pool_index = -1; 520 kioc->buf_vaddr = NULL; 521 kioc->buf_paddr = 0; 522 kioc->free_buf = 0; 523 524 /* 525 * We need xferlen amount of memory. See if we can get it from our 526 * dma pools. If we don't get exact size, we will try bigger buffer 527 */ 528 529 for (i = 0; i < MAX_DMA_POOLS; i++) { 530 531 pool = &adp->dma_pool_list[i]; 532 533 if (xferlen > pool->buf_size) 534 continue; 535 536 if (right_pool == -1) 537 right_pool = i; 538 539 spin_lock_irqsave(&pool->lock, flags); 540 541 if (!pool->in_use) { 542 543 pool->in_use = 1; 544 kioc->pool_index = i; 545 kioc->buf_vaddr = pool->vaddr; 546 kioc->buf_paddr = pool->paddr; 547 548 spin_unlock_irqrestore(&pool->lock, flags); 549 return 0; 550 } 551 else { 552 spin_unlock_irqrestore(&pool->lock, flags); 553 continue; 554 } 555 } 556 557 /* 558 * If xferlen doesn't match any of our pools, return error 559 */ 560 if (right_pool == -1) 561 return -EINVAL; 562 563 /* 564 * We did not get any buffer from the preallocated pool. Let us try 565 * to allocate one new buffer. NOTE: This is a blocking call. 566 */ 567 pool = &adp->dma_pool_list[right_pool]; 568 569 spin_lock_irqsave(&pool->lock, flags); 570 571 kioc->pool_index = right_pool; 572 kioc->free_buf = 1; 573 kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, 574 &kioc->buf_paddr); 575 spin_unlock_irqrestore(&pool->lock, flags); 576 577 if (!kioc->buf_vaddr) 578 return -ENOMEM; 579 580 return 0; 581} 582 583/** 584 * mraid_mm_alloc_kioc - Returns a uioc_t from free list 585 * @adp : Adapter softstate for this module 586 * 587 * The kioc_semaphore is initialized with number of kioc nodes in the 588 * free kioc pool. If the kioc pool is empty, this function blocks till 589 * a kioc becomes free. 590 */ 591static uioc_t * 592mraid_mm_alloc_kioc(mraid_mmadp_t *adp) 593{ 594 uioc_t *kioc; 595 struct list_head* head; 596 unsigned long flags; 597 598 down(&adp->kioc_semaphore); 599 600 spin_lock_irqsave(&adp->kioc_pool_lock, flags); 601 602 head = &adp->kioc_pool; 603 604 if (list_empty(head)) { 605 up(&adp->kioc_semaphore); 606 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 607 608 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); 609 return NULL; 610 } 611 612 kioc = list_entry(head->next, uioc_t, list); 613 list_del_init(&kioc->list); 614 615 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 616 617 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); 618 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); 619 620 kioc->buf_vaddr = NULL; 621 kioc->buf_paddr = 0; 622 kioc->pool_index =-1; 623 kioc->free_buf = 0; 624 kioc->user_data = NULL; 625 kioc->user_data_len = 0; 626 kioc->user_pthru = NULL; 627 kioc->timedout = 0; 628 629 return kioc; 630} 631 632/** 633 * mraid_mm_dealloc_kioc - Return kioc to free pool 634 * @adp : Adapter softstate 635 * @kioc : uioc_t node to be returned to free pool 636 */ 637static void 638mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) 639{ 640 mm_dmapool_t *pool; 641 unsigned long flags; 642 643 if (kioc->pool_index != -1) { 644 pool = &adp->dma_pool_list[kioc->pool_index]; 645 646 /* This routine may be called in non-isr context also */ 647 spin_lock_irqsave(&pool->lock, flags); 648 649 /* 650 * While attaching the dma buffer, if we didn't get the 651 * required buffer from the pool, we would have allocated 652 * it at the run time and set the free_buf flag. We must 653 * free that buffer. Otherwise, just mark that the buffer is 654 * not in use 655 */ 656 if (kioc->free_buf == 1) 657 pci_pool_free(pool->handle, kioc->buf_vaddr, 658 kioc->buf_paddr); 659 else 660 pool->in_use = 0; 661 662 spin_unlock_irqrestore(&pool->lock, flags); 663 } 664 665 /* Return the kioc to the free pool */ 666 spin_lock_irqsave(&adp->kioc_pool_lock, flags); 667 list_add(&kioc->list, &adp->kioc_pool); 668 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); 669 670 /* increment the free kioc count */ 671 up(&adp->kioc_semaphore); 672 673 return; 674} 675 676/** 677 * lld_ioctl - Routine to issue ioctl to low level drvr 678 * @adp : The adapter handle 679 * @kioc : The ioctl packet with kernel addresses 680 */ 681static int 682lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) 683{ 684 int rval; 685 struct timer_list timer; 686 struct timer_list *tp = NULL; 687 688 kioc->status = -ENODATA; 689 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); 690 691 if (rval) return rval; 692 693 /* 694 * Start the timer 695 */ 696 if (adp->timeout > 0) { 697 tp = &timer; 698 init_timer(tp); 699 700 tp->function = lld_timedout; 701 tp->data = (unsigned long)kioc; 702 tp->expires = jiffies + adp->timeout * HZ; 703 704 add_timer(tp); 705 } 706 707 /* 708 * Wait till the low level driver completes the ioctl. After this 709 * call, the ioctl either completed successfully or timedout. 710 */ 711 wait_event(wait_q, (kioc->status != -ENODATA)); 712 if (tp) { 713 del_timer_sync(tp); 714 } 715 716 /* 717 * If the command had timedout, we mark the controller offline 718 * before returning 719 */ 720 if (kioc->timedout) { 721 adp->quiescent = 0; 722 } 723 724 return kioc->status; 725} 726 727 728/** 729 * ioctl_done - callback from the low level driver 730 * @kioc : completed ioctl packet 731 */ 732static void 733ioctl_done(uioc_t *kioc) 734{ 735 uint32_t adapno; 736 int iterator; 737 mraid_mmadp_t* adapter; 738 739 /* 740 * When the kioc returns from driver, make sure it still doesn't 741 * have ENODATA in status. Otherwise, driver will hang on wait_event 742 * forever 743 */ 744 if (kioc->status == -ENODATA) { 745 con_log(CL_ANN, (KERN_WARNING 746 "megaraid cmm: lld didn't change status!\n")); 747 748 kioc->status = -EINVAL; 749 } 750 751 /* 752 * Check if this kioc was timedout before. If so, nobody is waiting 753 * on this kioc. We don't have to wake up anybody. Instead, we just 754 * have to free the kioc 755 */ 756 if (kioc->timedout) { 757 iterator = 0; 758 adapter = NULL; 759 adapno = kioc->adapno; 760 761 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " 762 "ioctl that was timedout before\n")); 763 764 list_for_each_entry(adapter, &adapters_list_g, list) { 765 if (iterator++ == adapno) break; 766 } 767 768 kioc->timedout = 0; 769 770 if (adapter) { 771 mraid_mm_dealloc_kioc( adapter, kioc ); 772 } 773 } 774 else { 775 wake_up(&wait_q); 776 } 777} 778 779 780/** 781 * lld_timedout - callback from the expired timer 782 * @ptr : ioctl packet that timed out 783 */ 784static void 785lld_timedout(unsigned long ptr) 786{ 787 uioc_t *kioc = (uioc_t *)ptr; 788 789 kioc->status = -ETIME; 790 kioc->timedout = 1; 791 792 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); 793 794 wake_up(&wait_q); 795} 796 797 798/** 799 * kioc_to_mimd - Converter from new back to old format 800 * @kioc : Kernel space IOCTL packet (successfully issued) 801 * @mimd : User space MIMD packet 802 */ 803static int 804kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) 805{ 806 mimd_t kmimd; 807 uint8_t opcode; 808 uint8_t subopcode; 809 810 mbox64_t *mbox64; 811 mraid_passthru_t __user *upthru32; 812 mraid_passthru_t *kpthru32; 813 mcontroller_t cinfo; 814 mraid_hba_info_t *hinfo; 815 816 817 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) 818 return (-EFAULT); 819 820 opcode = kmimd.ui.fcs.opcode; 821 subopcode = kmimd.ui.fcs.subopcode; 822 823 if (opcode == 0x82) { 824 switch (subopcode) { 825 826 case MEGAIOC_QADAPINFO: 827 828 hinfo = (mraid_hba_info_t *)(unsigned long) 829 kioc->buf_vaddr; 830 831 hinfo_to_cinfo(hinfo, &cinfo); 832 833 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) 834 return (-EFAULT); 835 836 return 0; 837 838 default: 839 return (-EINVAL); 840 } 841 842 return 0; 843 } 844 845 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; 846 847 if (kioc->user_pthru) { 848 849 upthru32 = kioc->user_pthru; 850 kpthru32 = kioc->pthru32; 851 852 if (copy_to_user(&upthru32->scsistatus, 853 &kpthru32->scsistatus, 854 sizeof(uint8_t))) { 855 return (-EFAULT); 856 } 857 } 858 859 if (kioc->user_data) { 860 if (copy_to_user(kioc->user_data, kioc->buf_vaddr, 861 kioc->user_data_len)) { 862 return (-EFAULT); 863 } 864 } 865 866 if (copy_to_user(&mimd->mbox[17], 867 &mbox64->mbox32.status, sizeof(uint8_t))) { 868 return (-EFAULT); 869 } 870 871 return 0; 872} 873 874 875/** 876 * hinfo_to_cinfo - Convert new format hba info into old format 877 * @hinfo : New format, more comprehensive adapter info 878 * @cinfo : Old format adapter info to support mimd_t apps 879 */ 880static void 881hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) 882{ 883 if (!hinfo || !cinfo) 884 return; 885 886 cinfo->base = hinfo->baseport; 887 cinfo->irq = hinfo->irq; 888 cinfo->numldrv = hinfo->num_ldrv; 889 cinfo->pcibus = hinfo->pci_bus; 890 cinfo->pcidev = hinfo->pci_slot; 891 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); 892 cinfo->pciid = hinfo->pci_device_id; 893 cinfo->pcivendor = hinfo->pci_vendor_id; 894 cinfo->pcislot = hinfo->pci_slot; 895 cinfo->uid = hinfo->unique_id; 896} 897 898 899/** 900 * mraid_mm_register_adp - Registration routine for low level drivers 901 * @lld_adp : Adapter object 902 */ 903int 904mraid_mm_register_adp(mraid_mmadp_t *lld_adp) 905{ 906 mraid_mmadp_t *adapter; 907 mbox64_t *mbox_list; 908 uioc_t *kioc; 909 uint32_t rval; 910 int i; 911 912 913 if (lld_adp->drvr_type != DRVRTYPE_MBOX) 914 return (-EINVAL); 915 916 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); 917 918 if (!adapter) 919 return -ENOMEM; 920 921 922 adapter->unique_id = lld_adp->unique_id; 923 adapter->drvr_type = lld_adp->drvr_type; 924 adapter->drvr_data = lld_adp->drvr_data; 925 adapter->pdev = lld_adp->pdev; 926 adapter->issue_uioc = lld_adp->issue_uioc; 927 adapter->timeout = lld_adp->timeout; 928 adapter->max_kioc = lld_adp->max_kioc; 929 adapter->quiescent = 1; 930 931 /* 932 * Allocate single blocks of memory for all required kiocs, 933 * mailboxes and passthru structures. 934 */ 935 adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc, 936 GFP_KERNEL); 937 adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc, 938 GFP_KERNEL); 939 adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool", 940 adapter->pdev, 941 sizeof(mraid_passthru_t), 942 16, 0); 943 944 if (!adapter->kioc_list || !adapter->mbox_list || 945 !adapter->pthru_dma_pool) { 946 947 con_log(CL_ANN, (KERN_WARNING 948 "megaraid cmm: out of memory, %s %d\n", __func__, 949 __LINE__)); 950 951 rval = (-ENOMEM); 952 953 goto memalloc_error; 954 } 955 956 /* 957 * Slice kioc_list and make a kioc_pool with the individiual kiocs 958 */ 959 INIT_LIST_HEAD(&adapter->kioc_pool); 960 spin_lock_init(&adapter->kioc_pool_lock); 961 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); 962 963 mbox_list = (mbox64_t *)adapter->mbox_list; 964 965 for (i = 0; i < lld_adp->max_kioc; i++) { 966 967 kioc = adapter->kioc_list + i; 968 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); 969 kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool, 970 GFP_KERNEL, &kioc->pthru32_h); 971 972 if (!kioc->pthru32) { 973 974 con_log(CL_ANN, (KERN_WARNING 975 "megaraid cmm: out of memory, %s %d\n", 976 __func__, __LINE__)); 977 978 rval = (-ENOMEM); 979 980 goto pthru_dma_pool_error; 981 } 982 983 list_add_tail(&kioc->list, &adapter->kioc_pool); 984 } 985 986 // Setup the dma pools for data buffers 987 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { 988 goto dma_pool_error; 989 } 990 991 list_add_tail(&adapter->list, &adapters_list_g); 992 993 adapters_count_g++; 994 995 return 0; 996 997dma_pool_error: 998 /* Do nothing */ 999 1000pthru_dma_pool_error: 1001 1002 for (i = 0; i < lld_adp->max_kioc; i++) { 1003 kioc = adapter->kioc_list + i; 1004 if (kioc->pthru32) { 1005 pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32, 1006 kioc->pthru32_h); 1007 } 1008 } 1009 1010memalloc_error: 1011 1012 kfree(adapter->kioc_list); 1013 kfree(adapter->mbox_list); 1014 1015 if (adapter->pthru_dma_pool) 1016 pci_pool_destroy(adapter->pthru_dma_pool); 1017 1018 kfree(adapter); 1019 1020 return rval; 1021} 1022 1023 1024/** 1025 * mraid_mm_adapter_app_handle - return the application handle for this adapter 1026 * @unique_id : adapter unique identifier 1027 * 1028 * For the given driver data, locate the adapter in our global list and 1029 * return the corresponding handle, which is also used by applications to 1030 * uniquely identify an adapter. 1031 * 1032 * Return adapter handle if found in the list. 1033 * Return 0 if adapter could not be located, should never happen though. 1034 */ 1035uint32_t 1036mraid_mm_adapter_app_handle(uint32_t unique_id) 1037{ 1038 mraid_mmadp_t *adapter; 1039 mraid_mmadp_t *tmp; 1040 int index = 0; 1041 1042 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { 1043 1044 if (adapter->unique_id == unique_id) { 1045 1046 return MKADAP(index); 1047 } 1048 1049 index++; 1050 } 1051 1052 return 0; 1053} 1054 1055 1056/** 1057 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter 1058 * @adp : Adapter softstate 1059 * 1060 * We maintain a pool of dma buffers per each adapter. Each pool has one 1061 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. 1062 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We 1063 * dont' want to waste too much memory by allocating more buffers per each 1064 * pool. 1065 */ 1066static int 1067mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) 1068{ 1069 mm_dmapool_t *pool; 1070 int bufsize; 1071 int i; 1072 1073 /* 1074 * Create MAX_DMA_POOLS number of pools 1075 */ 1076 bufsize = MRAID_MM_INIT_BUFF_SIZE; 1077 1078 for (i = 0; i < MAX_DMA_POOLS; i++){ 1079 1080 pool = &adp->dma_pool_list[i]; 1081 1082 pool->buf_size = bufsize; 1083 spin_lock_init(&pool->lock); 1084 1085 pool->handle = pci_pool_create("megaraid mm data buffer", 1086 adp->pdev, bufsize, 16, 0); 1087 1088 if (!pool->handle) { 1089 goto dma_pool_setup_error; 1090 } 1091 1092 pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, 1093 &pool->paddr); 1094 1095 if (!pool->vaddr) 1096 goto dma_pool_setup_error; 1097 1098 bufsize = bufsize * 2; 1099 } 1100 1101 return 0; 1102 1103dma_pool_setup_error: 1104 1105 mraid_mm_teardown_dma_pools(adp); 1106 return (-ENOMEM); 1107} 1108 1109 1110/** 1111 * mraid_mm_unregister_adp - Unregister routine for low level drivers 1112 * @unique_id : UID of the adpater 1113 * 1114 * Assumes no outstanding ioctls to llds. 1115 */ 1116int 1117mraid_mm_unregister_adp(uint32_t unique_id) 1118{ 1119 mraid_mmadp_t *adapter; 1120 mraid_mmadp_t *tmp; 1121 1122 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { 1123 1124 1125 if (adapter->unique_id == unique_id) { 1126 1127 adapters_count_g--; 1128 1129 list_del_init(&adapter->list); 1130 1131 mraid_mm_free_adp_resources(adapter); 1132 1133 kfree(adapter); 1134 1135 con_log(CL_ANN, ( 1136 "megaraid cmm: Unregistered one adapter:%#x\n", 1137 unique_id)); 1138 1139 return 0; 1140 } 1141 } 1142 1143 return (-ENODEV); 1144} 1145 1146/** 1147 * mraid_mm_free_adp_resources - Free adapter softstate 1148 * @adp : Adapter softstate 1149 */ 1150static void 1151mraid_mm_free_adp_resources(mraid_mmadp_t *adp) 1152{ 1153 uioc_t *kioc; 1154 int i; 1155 1156 mraid_mm_teardown_dma_pools(adp); 1157 1158 for (i = 0; i < adp->max_kioc; i++) { 1159 1160 kioc = adp->kioc_list + i; 1161 1162 pci_pool_free(adp->pthru_dma_pool, kioc->pthru32, 1163 kioc->pthru32_h); 1164 } 1165 1166 kfree(adp->kioc_list); 1167 kfree(adp->mbox_list); 1168 1169 pci_pool_destroy(adp->pthru_dma_pool); 1170 1171 1172 return; 1173} 1174 1175 1176/** 1177 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers 1178 * @adp : Adapter softstate 1179 */ 1180static void 1181mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) 1182{ 1183 int i; 1184 mm_dmapool_t *pool; 1185 1186 for (i = 0; i < MAX_DMA_POOLS; i++) { 1187 1188 pool = &adp->dma_pool_list[i]; 1189 1190 if (pool->handle) { 1191 1192 if (pool->vaddr) 1193 pci_pool_free(pool->handle, pool->vaddr, 1194 pool->paddr); 1195 1196 pci_pool_destroy(pool->handle); 1197 pool->handle = NULL; 1198 } 1199 } 1200 1201 return; 1202} 1203 1204/** 1205 * mraid_mm_init - Module entry point 1206 */ 1207static int __init 1208mraid_mm_init(void) 1209{ 1210 int err; 1211 1212 // Announce the driver version 1213 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", 1214 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); 1215 1216 err = misc_register(&megaraid_mm_dev); 1217 if (err < 0) { 1218 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n")); 1219 return err; 1220 } 1221 1222 init_waitqueue_head(&wait_q); 1223 1224 INIT_LIST_HEAD(&adapters_list_g); 1225 1226 return 0; 1227} 1228 1229 1230#ifdef CONFIG_COMPAT 1231/** 1232 * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine 1233 * @filep : file operations pointer (ignored) 1234 * @cmd : ioctl command 1235 * @arg : user ioctl packet 1236 */ 1237static long 1238mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, 1239 unsigned long arg) 1240{ 1241 int err; 1242 1243 err = mraid_mm_ioctl(filep, cmd, arg); 1244 1245 return err; 1246} 1247#endif 1248 1249/** 1250 * mraid_mm_exit - Module exit point 1251 */ 1252static void __exit 1253mraid_mm_exit(void) 1254{ 1255 con_log(CL_DLEVEL1 , ("exiting common mod\n")); 1256 1257 misc_deregister(&megaraid_mm_dev); 1258} 1259 1260module_init(mraid_mm_init); 1261module_exit(mraid_mm_exit); 1262 1263/* vi: set ts=8 sw=8 tw=78: */ 1264