1/* 2 * Disk Array driver for HP Smart Array controllers. 3 * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 17 * 02111-1307, USA. 18 * 19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 20 * 21 */ 22 23#include <linux/module.h> 24#include <linux/interrupt.h> 25#include <linux/types.h> 26#include <linux/pci.h> 27#include <linux/pci-aspm.h> 28#include <linux/kernel.h> 29#include <linux/slab.h> 30#include <linux/delay.h> 31#include <linux/major.h> 32#include <linux/fs.h> 33#include <linux/bio.h> 34#include <linux/blkpg.h> 35#include <linux/timer.h> 36#include <linux/proc_fs.h> 37#include <linux/seq_file.h> 38#include <linux/init.h> 39#include <linux/jiffies.h> 40#include <linux/hdreg.h> 41#include <linux/spinlock.h> 42#include <linux/compat.h> 43#include <linux/mutex.h> 44#include <linux/bitmap.h> 45#include <linux/io.h> 46#include <asm/uaccess.h> 47 48#include <linux/dma-mapping.h> 49#include <linux/blkdev.h> 50#include <linux/genhd.h> 51#include <linux/completion.h> 52#include <scsi/scsi.h> 53#include <scsi/sg.h> 54#include <scsi/scsi_ioctl.h> 55#include <linux/cdrom.h> 56#include <linux/scatterlist.h> 57#include <linux/kthread.h> 58 59#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 60#define DRIVER_NAME "HP CISS Driver (v 3.6.26)" 61#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) 62 63/* Embedded module documentation macros - see modules.h */ 64MODULE_AUTHOR("Hewlett-Packard Company"); 65MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); 66MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 67MODULE_VERSION("3.6.26"); 68MODULE_LICENSE("GPL"); 69static int cciss_tape_cmds = 6; 70module_param(cciss_tape_cmds, int, 0644); 71MODULE_PARM_DESC(cciss_tape_cmds, 72 "number of commands to allocate for tape devices (default: 6)"); 73static int cciss_simple_mode; 74module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR); 75MODULE_PARM_DESC(cciss_simple_mode, 76 "Use 'simple mode' rather than 'performant mode'"); 77 78static int cciss_allow_hpsa; 79module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); 80MODULE_PARM_DESC(cciss_allow_hpsa, 81 "Prevent cciss driver from accessing hardware known to be " 82 " supported by the hpsa driver"); 83 84static DEFINE_MUTEX(cciss_mutex); 85static struct proc_dir_entry *proc_cciss; 86 87#include "cciss_cmd.h" 88#include "cciss.h" 89#include <linux/cciss_ioctl.h> 90 91/* define the PCI info for the cards we can control */ 92static const struct pci_device_id cciss_pci_device_id[] = { 93 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, 94 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, 95 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, 96 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, 97 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, 98 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, 99 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, 100 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, 101 {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, 104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, 110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, 111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, 112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, 113 {0,} 114}; 115 116MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); 117 118/* board_id = Subsystem Device ID & Vendor ID 119 * product = Marketing Name for the board 120 * access = Address of the struct of function pointers 121 */ 122static struct board_type products[] = { 123 {0x40700E11, "Smart Array 5300", &SA5_access}, 124 {0x40800E11, "Smart Array 5i", &SA5B_access}, 125 {0x40820E11, "Smart Array 532", &SA5B_access}, 126 {0x40830E11, "Smart Array 5312", &SA5B_access}, 127 {0x409A0E11, "Smart Array 641", &SA5_access}, 128 {0x409B0E11, "Smart Array 642", &SA5_access}, 129 {0x409C0E11, "Smart Array 6400", &SA5_access}, 130 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 131 {0x40910E11, "Smart Array 6i", &SA5_access}, 132 {0x3225103C, "Smart Array P600", &SA5_access}, 133 {0x3223103C, "Smart Array P800", &SA5_access}, 134 {0x3234103C, "Smart Array P400", &SA5_access}, 135 {0x3235103C, "Smart Array P400i", &SA5_access}, 136 {0x3211103C, "Smart Array E200i", &SA5_access}, 137 {0x3212103C, "Smart Array E200", &SA5_access}, 138 {0x3213103C, "Smart Array E200i", &SA5_access}, 139 {0x3214103C, "Smart Array E200i", &SA5_access}, 140 {0x3215103C, "Smart Array E200i", &SA5_access}, 141 {0x3237103C, "Smart Array E500", &SA5_access}, 142 {0x3223103C, "Smart Array P800", &SA5_access}, 143 {0x3234103C, "Smart Array P400", &SA5_access}, 144 {0x323D103C, "Smart Array P700m", &SA5_access}, 145}; 146 147/* How long to wait (in milliseconds) for board to go into simple mode */ 148#define MAX_CONFIG_WAIT 30000 149#define MAX_IOCTL_CONFIG_WAIT 1000 150 151/*define how many times we will try a command because of bus resets */ 152#define MAX_CMD_RETRIES 3 153 154#define MAX_CTLR 32 155 156/* Originally cciss driver only supports 8 major numbers */ 157#define MAX_CTLR_ORIG 8 158 159static ctlr_info_t *hba[MAX_CTLR]; 160 161static struct task_struct *cciss_scan_thread; 162static DEFINE_MUTEX(scan_mutex); 163static LIST_HEAD(scan_q); 164 165static void do_cciss_request(struct request_queue *q); 166static irqreturn_t do_cciss_intx(int irq, void *dev_id); 167static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); 168static int cciss_open(struct block_device *bdev, fmode_t mode); 169static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); 170static void cciss_release(struct gendisk *disk, fmode_t mode); 171static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 172 unsigned int cmd, unsigned long arg); 173static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 174 175static int cciss_revalidate(struct gendisk *disk); 176static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); 177static int deregister_disk(ctlr_info_t *h, int drv_index, 178 int clear_all, int via_ioctl); 179 180static void cciss_read_capacity(ctlr_info_t *h, int logvol, 181 sector_t *total_size, unsigned int *block_size); 182static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, 183 sector_t *total_size, unsigned int *block_size); 184static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, 185 sector_t total_size, 186 unsigned int block_size, InquiryData_struct *inq_buff, 187 drive_info_struct *drv); 188static void cciss_interrupt_mode(ctlr_info_t *); 189static int cciss_enter_simple_mode(struct ctlr_info *h); 190static void start_io(ctlr_info_t *h); 191static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, 192 __u8 page_code, unsigned char scsi3addr[], 193 int cmd_type); 194static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 195 int attempt_retry); 196static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 197 198static int add_to_scan_list(struct ctlr_info *h); 199static int scan_thread(void *data); 200static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); 201static void cciss_hba_release(struct device *dev); 202static void cciss_device_release(struct device *dev); 203static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); 204static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); 205static inline u32 next_command(ctlr_info_t *h); 206static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 207 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 208 u64 *cfg_offset); 209static int cciss_pci_find_memory_BAR(struct pci_dev *pdev, 210 unsigned long *memory_bar); 211static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); 212static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable); 213 214/* performant mode helper functions */ 215static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, 216 int *bucket_map); 217static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); 218 219#ifdef CONFIG_PROC_FS 220static void cciss_procinit(ctlr_info_t *h); 221#else 222static void cciss_procinit(ctlr_info_t *h) 223{ 224} 225#endif /* CONFIG_PROC_FS */ 226 227#ifdef CONFIG_COMPAT 228static int cciss_compat_ioctl(struct block_device *, fmode_t, 229 unsigned, unsigned long); 230#endif 231 232static const struct block_device_operations cciss_fops = { 233 .owner = THIS_MODULE, 234 .open = cciss_unlocked_open, 235 .release = cciss_release, 236 .ioctl = cciss_ioctl, 237 .getgeo = cciss_getgeo, 238#ifdef CONFIG_COMPAT 239 .compat_ioctl = cciss_compat_ioctl, 240#endif 241 .revalidate_disk = cciss_revalidate, 242}; 243 244/* set_performant_mode: Modify the tag for cciss performant 245 * set bit 0 for pull model, bits 3-1 for block fetch 246 * register number 247 */ 248static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) 249{ 250 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 251 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 252} 253 254/* 255 * Enqueuing and dequeuing functions for cmdlists. 256 */ 257static inline void addQ(struct list_head *list, CommandList_struct *c) 258{ 259 list_add_tail(&c->list, list); 260} 261 262static inline void removeQ(CommandList_struct *c) 263{ 264 /* 265 * After kexec/dump some commands might still 266 * be in flight, which the firmware will try 267 * to complete. Resetting the firmware doesn't work 268 * with old fw revisions, so we have to mark 269 * them off as 'stale' to prevent the driver from 270 * falling over. 271 */ 272 if (WARN_ON(list_empty(&c->list))) { 273 c->cmd_type = CMD_MSG_STALE; 274 return; 275 } 276 277 list_del_init(&c->list); 278} 279 280static void enqueue_cmd_and_start_io(ctlr_info_t *h, 281 CommandList_struct *c) 282{ 283 unsigned long flags; 284 set_performant_mode(h, c); 285 spin_lock_irqsave(&h->lock, flags); 286 addQ(&h->reqQ, c); 287 h->Qdepth++; 288 if (h->Qdepth > h->maxQsinceinit) 289 h->maxQsinceinit = h->Qdepth; 290 start_io(h); 291 spin_unlock_irqrestore(&h->lock, flags); 292} 293 294static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, 295 int nr_cmds) 296{ 297 int i; 298 299 if (!cmd_sg_list) 300 return; 301 for (i = 0; i < nr_cmds; i++) { 302 kfree(cmd_sg_list[i]); 303 cmd_sg_list[i] = NULL; 304 } 305 kfree(cmd_sg_list); 306} 307 308static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( 309 ctlr_info_t *h, int chainsize, int nr_cmds) 310{ 311 int j; 312 SGDescriptor_struct **cmd_sg_list; 313 314 if (chainsize <= 0) 315 return NULL; 316 317 cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); 318 if (!cmd_sg_list) 319 return NULL; 320 321 /* Build up chain blocks for each command */ 322 for (j = 0; j < nr_cmds; j++) { 323 /* Need a block of chainsized s/g elements. */ 324 cmd_sg_list[j] = kmalloc((chainsize * 325 sizeof(*cmd_sg_list[j])), GFP_KERNEL); 326 if (!cmd_sg_list[j]) { 327 dev_err(&h->pdev->dev, "Cannot get memory " 328 "for s/g chains.\n"); 329 goto clean; 330 } 331 } 332 return cmd_sg_list; 333clean: 334 cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); 335 return NULL; 336} 337 338static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) 339{ 340 SGDescriptor_struct *chain_sg; 341 u64bit temp64; 342 343 if (c->Header.SGTotal <= h->max_cmd_sgentries) 344 return; 345 346 chain_sg = &c->SG[h->max_cmd_sgentries - 1]; 347 temp64.val32.lower = chain_sg->Addr.lower; 348 temp64.val32.upper = chain_sg->Addr.upper; 349 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 350} 351 352static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, 353 SGDescriptor_struct *chain_block, int len) 354{ 355 SGDescriptor_struct *chain_sg; 356 u64bit temp64; 357 358 chain_sg = &c->SG[h->max_cmd_sgentries - 1]; 359 chain_sg->Ext = CCISS_SG_CHAIN; 360 chain_sg->Len = len; 361 temp64.val = pci_map_single(h->pdev, chain_block, len, 362 PCI_DMA_TODEVICE); 363 chain_sg->Addr.lower = temp64.val32.lower; 364 chain_sg->Addr.upper = temp64.val32.upper; 365} 366 367#include "cciss_scsi.c" /* For SCSI tape support */ 368 369static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 370 "UNKNOWN" 371}; 372#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1) 373 374#ifdef CONFIG_PROC_FS 375 376/* 377 * Report information about this controller. 378 */ 379#define ENG_GIG 1000000000 380#define ENG_GIG_FACTOR (ENG_GIG/512) 381#define ENGAGE_SCSI "engage scsi" 382 383static void cciss_seq_show_header(struct seq_file *seq) 384{ 385 ctlr_info_t *h = seq->private; 386 387 seq_printf(seq, "%s: HP %s Controller\n" 388 "Board ID: 0x%08lx\n" 389 "Firmware Version: %c%c%c%c\n" 390 "IRQ: %d\n" 391 "Logical drives: %d\n" 392 "Current Q depth: %d\n" 393 "Current # commands on controller: %d\n" 394 "Max Q depth since init: %d\n" 395 "Max # commands on controller since init: %d\n" 396 "Max SG entries since init: %d\n", 397 h->devname, 398 h->product_name, 399 (unsigned long)h->board_id, 400 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 401 h->firm_ver[3], (unsigned int)h->intr[h->intr_mode], 402 h->num_luns, 403 h->Qdepth, h->commands_outstanding, 404 h->maxQsinceinit, h->max_outstanding, h->maxSG); 405 406#ifdef CONFIG_CISS_SCSI_TAPE 407 cciss_seq_tape_report(seq, h); 408#endif /* CONFIG_CISS_SCSI_TAPE */ 409} 410 411static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) 412{ 413 ctlr_info_t *h = seq->private; 414 unsigned long flags; 415 416 /* prevent displaying bogus info during configuration 417 * or deconfiguration of a logical volume 418 */ 419 spin_lock_irqsave(&h->lock, flags); 420 if (h->busy_configuring) { 421 spin_unlock_irqrestore(&h->lock, flags); 422 return ERR_PTR(-EBUSY); 423 } 424 h->busy_configuring = 1; 425 spin_unlock_irqrestore(&h->lock, flags); 426 427 if (*pos == 0) 428 cciss_seq_show_header(seq); 429 430 return pos; 431} 432 433static int cciss_seq_show(struct seq_file *seq, void *v) 434{ 435 sector_t vol_sz, vol_sz_frac; 436 ctlr_info_t *h = seq->private; 437 unsigned ctlr = h->ctlr; 438 loff_t *pos = v; 439 drive_info_struct *drv = h->drv[*pos]; 440 441 if (*pos > h->highest_lun) 442 return 0; 443 444 if (drv == NULL) /* it's possible for h->drv[] to have holes. */ 445 return 0; 446 447 if (drv->heads == 0) 448 return 0; 449 450 vol_sz = drv->nr_blocks; 451 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); 452 vol_sz_frac *= 100; 453 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 454 455 if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) 456 drv->raid_level = RAID_UNKNOWN; 457 seq_printf(seq, "cciss/c%dd%d:" 458 "\t%4u.%02uGB\tRAID %s\n", 459 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, 460 raid_label[drv->raid_level]); 461 return 0; 462} 463 464static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) 465{ 466 ctlr_info_t *h = seq->private; 467 468 if (*pos > h->highest_lun) 469 return NULL; 470 *pos += 1; 471 472 return pos; 473} 474 475static void cciss_seq_stop(struct seq_file *seq, void *v) 476{ 477 ctlr_info_t *h = seq->private; 478 479 /* Only reset h->busy_configuring if we succeeded in setting 480 * it during cciss_seq_start. */ 481 if (v == ERR_PTR(-EBUSY)) 482 return; 483 484 h->busy_configuring = 0; 485} 486 487static const struct seq_operations cciss_seq_ops = { 488 .start = cciss_seq_start, 489 .show = cciss_seq_show, 490 .next = cciss_seq_next, 491 .stop = cciss_seq_stop, 492}; 493 494static int cciss_seq_open(struct inode *inode, struct file *file) 495{ 496 int ret = seq_open(file, &cciss_seq_ops); 497 struct seq_file *seq = file->private_data; 498 499 if (!ret) 500 seq->private = PDE_DATA(inode); 501 502 return ret; 503} 504 505static ssize_t 506cciss_proc_write(struct file *file, const char __user *buf, 507 size_t length, loff_t *ppos) 508{ 509 int err; 510 char *buffer; 511 512#ifndef CONFIG_CISS_SCSI_TAPE 513 return -EINVAL; 514#endif 515 516 if (!buf || length > PAGE_SIZE - 1) 517 return -EINVAL; 518 519 buffer = (char *)__get_free_page(GFP_KERNEL); 520 if (!buffer) 521 return -ENOMEM; 522 523 err = -EFAULT; 524 if (copy_from_user(buffer, buf, length)) 525 goto out; 526 buffer[length] = '\0'; 527 528#ifdef CONFIG_CISS_SCSI_TAPE 529 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { 530 struct seq_file *seq = file->private_data; 531 ctlr_info_t *h = seq->private; 532 533 err = cciss_engage_scsi(h); 534 if (err == 0) 535 err = length; 536 } else 537#endif /* CONFIG_CISS_SCSI_TAPE */ 538 err = -EINVAL; 539 /* might be nice to have "disengage" too, but it's not 540 safely possible. (only 1 module use count, lock issues.) */ 541 542out: 543 free_page((unsigned long)buffer); 544 return err; 545} 546 547static const struct file_operations cciss_proc_fops = { 548 .owner = THIS_MODULE, 549 .open = cciss_seq_open, 550 .read = seq_read, 551 .llseek = seq_lseek, 552 .release = seq_release, 553 .write = cciss_proc_write, 554}; 555 556static void cciss_procinit(ctlr_info_t *h) 557{ 558 struct proc_dir_entry *pde; 559 560 if (proc_cciss == NULL) 561 proc_cciss = proc_mkdir("driver/cciss", NULL); 562 if (!proc_cciss) 563 return; 564 pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | 565 S_IROTH, proc_cciss, 566 &cciss_proc_fops, h); 567} 568#endif /* CONFIG_PROC_FS */ 569 570#define MAX_PRODUCT_NAME_LEN 19 571 572#define to_hba(n) container_of(n, struct ctlr_info, dev) 573#define to_drv(n) container_of(n, drive_info_struct, dev) 574 575/* List of controllers which cannot be hard reset on kexec with reset_devices */ 576static u32 unresettable_controller[] = { 577 0x324a103C, /* Smart Array P712m */ 578 0x324b103C, /* SmartArray P711m */ 579 0x3223103C, /* Smart Array P800 */ 580 0x3234103C, /* Smart Array P400 */ 581 0x3235103C, /* Smart Array P400i */ 582 0x3211103C, /* Smart Array E200i */ 583 0x3212103C, /* Smart Array E200 */ 584 0x3213103C, /* Smart Array E200i */ 585 0x3214103C, /* Smart Array E200i */ 586 0x3215103C, /* Smart Array E200i */ 587 0x3237103C, /* Smart Array E500 */ 588 0x323D103C, /* Smart Array P700m */ 589 0x409C0E11, /* Smart Array 6400 */ 590 0x409D0E11, /* Smart Array 6400 EM */ 591}; 592 593/* List of controllers which cannot even be soft reset */ 594static u32 soft_unresettable_controller[] = { 595 0x409C0E11, /* Smart Array 6400 */ 596 0x409D0E11, /* Smart Array 6400 EM */ 597}; 598 599static int ctlr_is_hard_resettable(u32 board_id) 600{ 601 int i; 602 603 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) 604 if (unresettable_controller[i] == board_id) 605 return 0; 606 return 1; 607} 608 609static int ctlr_is_soft_resettable(u32 board_id) 610{ 611 int i; 612 613 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) 614 if (soft_unresettable_controller[i] == board_id) 615 return 0; 616 return 1; 617} 618 619static int ctlr_is_resettable(u32 board_id) 620{ 621 return ctlr_is_hard_resettable(board_id) || 622 ctlr_is_soft_resettable(board_id); 623} 624 625static ssize_t host_show_resettable(struct device *dev, 626 struct device_attribute *attr, 627 char *buf) 628{ 629 struct ctlr_info *h = to_hba(dev); 630 631 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); 632} 633static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); 634 635static ssize_t host_store_rescan(struct device *dev, 636 struct device_attribute *attr, 637 const char *buf, size_t count) 638{ 639 struct ctlr_info *h = to_hba(dev); 640 641 add_to_scan_list(h); 642 wake_up_process(cciss_scan_thread); 643 wait_for_completion_interruptible(&h->scan_wait); 644 645 return count; 646} 647static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 648 649static ssize_t host_show_transport_mode(struct device *dev, 650 struct device_attribute *attr, 651 char *buf) 652{ 653 struct ctlr_info *h = to_hba(dev); 654 655 return snprintf(buf, 20, "%s\n", 656 h->transMethod & CFGTBL_Trans_Performant ? 657 "performant" : "simple"); 658} 659static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); 660 661static ssize_t dev_show_unique_id(struct device *dev, 662 struct device_attribute *attr, 663 char *buf) 664{ 665 drive_info_struct *drv = to_drv(dev); 666 struct ctlr_info *h = to_hba(drv->dev.parent); 667 __u8 sn[16]; 668 unsigned long flags; 669 int ret = 0; 670 671 spin_lock_irqsave(&h->lock, flags); 672 if (h->busy_configuring) 673 ret = -EBUSY; 674 else 675 memcpy(sn, drv->serial_no, sizeof(sn)); 676 spin_unlock_irqrestore(&h->lock, flags); 677 678 if (ret) 679 return ret; 680 else 681 return snprintf(buf, 16 * 2 + 2, 682 "%02X%02X%02X%02X%02X%02X%02X%02X" 683 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 684 sn[0], sn[1], sn[2], sn[3], 685 sn[4], sn[5], sn[6], sn[7], 686 sn[8], sn[9], sn[10], sn[11], 687 sn[12], sn[13], sn[14], sn[15]); 688} 689static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); 690 691static ssize_t dev_show_vendor(struct device *dev, 692 struct device_attribute *attr, 693 char *buf) 694{ 695 drive_info_struct *drv = to_drv(dev); 696 struct ctlr_info *h = to_hba(drv->dev.parent); 697 char vendor[VENDOR_LEN + 1]; 698 unsigned long flags; 699 int ret = 0; 700 701 spin_lock_irqsave(&h->lock, flags); 702 if (h->busy_configuring) 703 ret = -EBUSY; 704 else 705 memcpy(vendor, drv->vendor, VENDOR_LEN + 1); 706 spin_unlock_irqrestore(&h->lock, flags); 707 708 if (ret) 709 return ret; 710 else 711 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); 712} 713static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); 714 715static ssize_t dev_show_model(struct device *dev, 716 struct device_attribute *attr, 717 char *buf) 718{ 719 drive_info_struct *drv = to_drv(dev); 720 struct ctlr_info *h = to_hba(drv->dev.parent); 721 char model[MODEL_LEN + 1]; 722 unsigned long flags; 723 int ret = 0; 724 725 spin_lock_irqsave(&h->lock, flags); 726 if (h->busy_configuring) 727 ret = -EBUSY; 728 else 729 memcpy(model, drv->model, MODEL_LEN + 1); 730 spin_unlock_irqrestore(&h->lock, flags); 731 732 if (ret) 733 return ret; 734 else 735 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); 736} 737static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); 738 739static ssize_t dev_show_rev(struct device *dev, 740 struct device_attribute *attr, 741 char *buf) 742{ 743 drive_info_struct *drv = to_drv(dev); 744 struct ctlr_info *h = to_hba(drv->dev.parent); 745 char rev[REV_LEN + 1]; 746 unsigned long flags; 747 int ret = 0; 748 749 spin_lock_irqsave(&h->lock, flags); 750 if (h->busy_configuring) 751 ret = -EBUSY; 752 else 753 memcpy(rev, drv->rev, REV_LEN + 1); 754 spin_unlock_irqrestore(&h->lock, flags); 755 756 if (ret) 757 return ret; 758 else 759 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); 760} 761static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 762 763static ssize_t cciss_show_lunid(struct device *dev, 764 struct device_attribute *attr, char *buf) 765{ 766 drive_info_struct *drv = to_drv(dev); 767 struct ctlr_info *h = to_hba(drv->dev.parent); 768 unsigned long flags; 769 unsigned char lunid[8]; 770 771 spin_lock_irqsave(&h->lock, flags); 772 if (h->busy_configuring) { 773 spin_unlock_irqrestore(&h->lock, flags); 774 return -EBUSY; 775 } 776 if (!drv->heads) { 777 spin_unlock_irqrestore(&h->lock, flags); 778 return -ENOTTY; 779 } 780 memcpy(lunid, drv->LunID, sizeof(lunid)); 781 spin_unlock_irqrestore(&h->lock, flags); 782 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 783 lunid[0], lunid[1], lunid[2], lunid[3], 784 lunid[4], lunid[5], lunid[6], lunid[7]); 785} 786static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); 787 788static ssize_t cciss_show_raid_level(struct device *dev, 789 struct device_attribute *attr, char *buf) 790{ 791 drive_info_struct *drv = to_drv(dev); 792 struct ctlr_info *h = to_hba(drv->dev.parent); 793 int raid; 794 unsigned long flags; 795 796 spin_lock_irqsave(&h->lock, flags); 797 if (h->busy_configuring) { 798 spin_unlock_irqrestore(&h->lock, flags); 799 return -EBUSY; 800 } 801 raid = drv->raid_level; 802 spin_unlock_irqrestore(&h->lock, flags); 803 if (raid < 0 || raid > RAID_UNKNOWN) 804 raid = RAID_UNKNOWN; 805 806 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", 807 raid_label[raid]); 808} 809static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); 810 811static ssize_t cciss_show_usage_count(struct device *dev, 812 struct device_attribute *attr, char *buf) 813{ 814 drive_info_struct *drv = to_drv(dev); 815 struct ctlr_info *h = to_hba(drv->dev.parent); 816 unsigned long flags; 817 int count; 818 819 spin_lock_irqsave(&h->lock, flags); 820 if (h->busy_configuring) { 821 spin_unlock_irqrestore(&h->lock, flags); 822 return -EBUSY; 823 } 824 count = drv->usage_count; 825 spin_unlock_irqrestore(&h->lock, flags); 826 return snprintf(buf, 20, "%d\n", count); 827} 828static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); 829 830static struct attribute *cciss_host_attrs[] = { 831 &dev_attr_rescan.attr, 832 &dev_attr_resettable.attr, 833 &dev_attr_transport_mode.attr, 834 NULL 835}; 836 837static struct attribute_group cciss_host_attr_group = { 838 .attrs = cciss_host_attrs, 839}; 840 841static const struct attribute_group *cciss_host_attr_groups[] = { 842 &cciss_host_attr_group, 843 NULL 844}; 845 846static struct device_type cciss_host_type = { 847 .name = "cciss_host", 848 .groups = cciss_host_attr_groups, 849 .release = cciss_hba_release, 850}; 851 852static struct attribute *cciss_dev_attrs[] = { 853 &dev_attr_unique_id.attr, 854 &dev_attr_model.attr, 855 &dev_attr_vendor.attr, 856 &dev_attr_rev.attr, 857 &dev_attr_lunid.attr, 858 &dev_attr_raid_level.attr, 859 &dev_attr_usage_count.attr, 860 NULL 861}; 862 863static struct attribute_group cciss_dev_attr_group = { 864 .attrs = cciss_dev_attrs, 865}; 866 867static const struct attribute_group *cciss_dev_attr_groups[] = { 868 &cciss_dev_attr_group, 869 NULL 870}; 871 872static struct device_type cciss_dev_type = { 873 .name = "cciss_device", 874 .groups = cciss_dev_attr_groups, 875 .release = cciss_device_release, 876}; 877 878static struct bus_type cciss_bus_type = { 879 .name = "cciss", 880}; 881 882/* 883 * cciss_hba_release is called when the reference count 884 * of h->dev goes to zero. 885 */ 886static void cciss_hba_release(struct device *dev) 887{ 888 /* 889 * nothing to do, but need this to avoid a warning 890 * about not having a release handler from lib/kref.c. 891 */ 892} 893 894/* 895 * Initialize sysfs entry for each controller. This sets up and registers 896 * the 'cciss#' directory for each individual controller under 897 * /sys/bus/pci/devices/<dev>/. 898 */ 899static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) 900{ 901 device_initialize(&h->dev); 902 h->dev.type = &cciss_host_type; 903 h->dev.bus = &cciss_bus_type; 904 dev_set_name(&h->dev, "%s", h->devname); 905 h->dev.parent = &h->pdev->dev; 906 907 return device_add(&h->dev); 908} 909 910/* 911 * Remove sysfs entries for an hba. 912 */ 913static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 914{ 915 device_del(&h->dev); 916 put_device(&h->dev); /* final put. */ 917} 918 919/* cciss_device_release is called when the reference count 920 * of h->drv[x]dev goes to zero. 921 */ 922static void cciss_device_release(struct device *dev) 923{ 924 drive_info_struct *drv = to_drv(dev); 925 kfree(drv); 926} 927 928/* 929 * Initialize sysfs for each logical drive. This sets up and registers 930 * the 'c#d#' directory for each individual logical drive under 931 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 932 * /sys/block/cciss!c#d# to this entry. 933 */ 934static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, 935 int drv_index) 936{ 937 struct device *dev; 938 939 if (h->drv[drv_index]->device_initialized) 940 return 0; 941 942 dev = &h->drv[drv_index]->dev; 943 device_initialize(dev); 944 dev->type = &cciss_dev_type; 945 dev->bus = &cciss_bus_type; 946 dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); 947 dev->parent = &h->dev; 948 h->drv[drv_index]->device_initialized = 1; 949 return device_add(dev); 950} 951 952/* 953 * Remove sysfs entries for a logical drive. 954 */ 955static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, 956 int ctlr_exiting) 957{ 958 struct device *dev = &h->drv[drv_index]->dev; 959 960 /* special case for c*d0, we only destroy it on controller exit */ 961 if (drv_index == 0 && !ctlr_exiting) 962 return; 963 964 device_del(dev); 965 put_device(dev); /* the "final" put. */ 966 h->drv[drv_index] = NULL; 967} 968 969/* 970 * For operations that cannot sleep, a command block is allocated at init, 971 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 972 * which ones are free or in use. 973 */ 974static CommandList_struct *cmd_alloc(ctlr_info_t *h) 975{ 976 CommandList_struct *c; 977 int i; 978 u64bit temp64; 979 dma_addr_t cmd_dma_handle, err_dma_handle; 980 981 do { 982 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 983 if (i == h->nr_cmds) 984 return NULL; 985 } while (test_and_set_bit(i, h->cmd_pool_bits) != 0); 986 c = h->cmd_pool + i; 987 memset(c, 0, sizeof(CommandList_struct)); 988 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); 989 c->err_info = h->errinfo_pool + i; 990 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 991 err_dma_handle = h->errinfo_pool_dhandle 992 + i * sizeof(ErrorInfo_struct); 993 h->nr_allocs++; 994 995 c->cmdindex = i; 996 997 INIT_LIST_HEAD(&c->list); 998 c->busaddr = (__u32) cmd_dma_handle; 999 temp64.val = (__u64) err_dma_handle; 1000 c->ErrDesc.Addr.lower = temp64.val32.lower; 1001 c->ErrDesc.Addr.upper = temp64.val32.upper; 1002 c->ErrDesc.Len = sizeof(ErrorInfo_struct); 1003 1004 c->ctlr = h->ctlr; 1005 return c; 1006} 1007 1008/* allocate a command using pci_alloc_consistent, used for ioctls, 1009 * etc., not for the main i/o path. 1010 */ 1011static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) 1012{ 1013 CommandList_struct *c; 1014 u64bit temp64; 1015 dma_addr_t cmd_dma_handle, err_dma_handle; 1016 1017 c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct), 1018 &cmd_dma_handle); 1019 if (c == NULL) 1020 return NULL; 1021 1022 c->cmdindex = -1; 1023 1024 c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct), 1025 &err_dma_handle); 1026 1027 if (c->err_info == NULL) { 1028 pci_free_consistent(h->pdev, 1029 sizeof(CommandList_struct), c, cmd_dma_handle); 1030 return NULL; 1031 } 1032 1033 INIT_LIST_HEAD(&c->list); 1034 c->busaddr = (__u32) cmd_dma_handle; 1035 temp64.val = (__u64) err_dma_handle; 1036 c->ErrDesc.Addr.lower = temp64.val32.lower; 1037 c->ErrDesc.Addr.upper = temp64.val32.upper; 1038 c->ErrDesc.Len = sizeof(ErrorInfo_struct); 1039 1040 c->ctlr = h->ctlr; 1041 return c; 1042} 1043 1044static void cmd_free(ctlr_info_t *h, CommandList_struct *c) 1045{ 1046 int i; 1047 1048 i = c - h->cmd_pool; 1049 clear_bit(i, h->cmd_pool_bits); 1050 h->nr_frees++; 1051} 1052 1053static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) 1054{ 1055 u64bit temp64; 1056 1057 temp64.val32.lower = c->ErrDesc.Addr.lower; 1058 temp64.val32.upper = c->ErrDesc.Addr.upper; 1059 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 1060 c->err_info, (dma_addr_t) temp64.val); 1061 pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, 1062 (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); 1063} 1064 1065static inline ctlr_info_t *get_host(struct gendisk *disk) 1066{ 1067 return disk->queue->queuedata; 1068} 1069 1070static inline drive_info_struct *get_drv(struct gendisk *disk) 1071{ 1072 return disk->private_data; 1073} 1074 1075/* 1076 * Open. Make sure the device is really there. 1077 */ 1078static int cciss_open(struct block_device *bdev, fmode_t mode) 1079{ 1080 ctlr_info_t *h = get_host(bdev->bd_disk); 1081 drive_info_struct *drv = get_drv(bdev->bd_disk); 1082 1083 dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); 1084 if (drv->busy_configuring) 1085 return -EBUSY; 1086 /* 1087 * Root is allowed to open raw volume zero even if it's not configured 1088 * so array config can still work. Root is also allowed to open any 1089 * volume that has a LUN ID, so it can issue IOCTL to reread the 1090 * disk information. I don't think I really like this 1091 * but I'm already using way to many device nodes to claim another one 1092 * for "raw controller". 1093 */ 1094 if (drv->heads == 0) { 1095 if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ 1096 /* if not node 0 make sure it is a partition = 0 */ 1097 if (MINOR(bdev->bd_dev) & 0x0f) { 1098 return -ENXIO; 1099 /* if it is, make sure we have a LUN ID */ 1100 } else if (memcmp(drv->LunID, CTLR_LUNID, 1101 sizeof(drv->LunID))) { 1102 return -ENXIO; 1103 } 1104 } 1105 if (!capable(CAP_SYS_ADMIN)) 1106 return -EPERM; 1107 } 1108 drv->usage_count++; 1109 h->usage_count++; 1110 return 0; 1111} 1112 1113static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) 1114{ 1115 int ret; 1116 1117 mutex_lock(&cciss_mutex); 1118 ret = cciss_open(bdev, mode); 1119 mutex_unlock(&cciss_mutex); 1120 1121 return ret; 1122} 1123 1124/* 1125 * Close. Sync first. 1126 */ 1127static void cciss_release(struct gendisk *disk, fmode_t mode) 1128{ 1129 ctlr_info_t *h; 1130 drive_info_struct *drv; 1131 1132 mutex_lock(&cciss_mutex); 1133 h = get_host(disk); 1134 drv = get_drv(disk); 1135 dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); 1136 drv->usage_count--; 1137 h->usage_count--; 1138 mutex_unlock(&cciss_mutex); 1139} 1140 1141#ifdef CONFIG_COMPAT 1142 1143static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1144 unsigned cmd, unsigned long arg); 1145static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, 1146 unsigned cmd, unsigned long arg); 1147 1148static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, 1149 unsigned cmd, unsigned long arg) 1150{ 1151 switch (cmd) { 1152 case CCISS_GETPCIINFO: 1153 case CCISS_GETINTINFO: 1154 case CCISS_SETINTINFO: 1155 case CCISS_GETNODENAME: 1156 case CCISS_SETNODENAME: 1157 case CCISS_GETHEARTBEAT: 1158 case CCISS_GETBUSTYPES: 1159 case CCISS_GETFIRMVER: 1160 case CCISS_GETDRIVVER: 1161 case CCISS_REVALIDVOLS: 1162 case CCISS_DEREGDISK: 1163 case CCISS_REGNEWDISK: 1164 case CCISS_REGNEWD: 1165 case CCISS_RESCANDISK: 1166 case CCISS_GETLUNINFO: 1167 return cciss_ioctl(bdev, mode, cmd, arg); 1168 1169 case CCISS_PASSTHRU32: 1170 return cciss_ioctl32_passthru(bdev, mode, cmd, arg); 1171 case CCISS_BIG_PASSTHRU32: 1172 return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); 1173 1174 default: 1175 return -ENOIOCTLCMD; 1176 } 1177} 1178 1179static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1180 unsigned cmd, unsigned long arg) 1181{ 1182 IOCTL32_Command_struct __user *arg32 = 1183 (IOCTL32_Command_struct __user *) arg; 1184 IOCTL_Command_struct arg64; 1185 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); 1186 int err; 1187 u32 cp; 1188 1189 memset(&arg64, 0, sizeof(arg64)); 1190 err = 0; 1191 err |= 1192 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1193 sizeof(arg64.LUN_info)); 1194 err |= 1195 copy_from_user(&arg64.Request, &arg32->Request, 1196 sizeof(arg64.Request)); 1197 err |= 1198 copy_from_user(&arg64.error_info, &arg32->error_info, 1199 sizeof(arg64.error_info)); 1200 err |= get_user(arg64.buf_size, &arg32->buf_size); 1201 err |= get_user(cp, &arg32->buf); 1202 arg64.buf = compat_ptr(cp); 1203 err |= copy_to_user(p, &arg64, sizeof(arg64)); 1204 1205 if (err) 1206 return -EFAULT; 1207 1208 err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); 1209 if (err) 1210 return err; 1211 err |= 1212 copy_in_user(&arg32->error_info, &p->error_info, 1213 sizeof(arg32->error_info)); 1214 if (err) 1215 return -EFAULT; 1216 return err; 1217} 1218 1219static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, 1220 unsigned cmd, unsigned long arg) 1221{ 1222 BIG_IOCTL32_Command_struct __user *arg32 = 1223 (BIG_IOCTL32_Command_struct __user *) arg; 1224 BIG_IOCTL_Command_struct arg64; 1225 BIG_IOCTL_Command_struct __user *p = 1226 compat_alloc_user_space(sizeof(arg64)); 1227 int err; 1228 u32 cp; 1229 1230 memset(&arg64, 0, sizeof(arg64)); 1231 err = 0; 1232 err |= 1233 copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 1234 sizeof(arg64.LUN_info)); 1235 err |= 1236 copy_from_user(&arg64.Request, &arg32->Request, 1237 sizeof(arg64.Request)); 1238 err |= 1239 copy_from_user(&arg64.error_info, &arg32->error_info, 1240 sizeof(arg64.error_info)); 1241 err |= get_user(arg64.buf_size, &arg32->buf_size); 1242 err |= get_user(arg64.malloc_size, &arg32->malloc_size); 1243 err |= get_user(cp, &arg32->buf); 1244 arg64.buf = compat_ptr(cp); 1245 err |= copy_to_user(p, &arg64, sizeof(arg64)); 1246 1247 if (err) 1248 return -EFAULT; 1249 1250 err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); 1251 if (err) 1252 return err; 1253 err |= 1254 copy_in_user(&arg32->error_info, &p->error_info, 1255 sizeof(arg32->error_info)); 1256 if (err) 1257 return -EFAULT; 1258 return err; 1259} 1260#endif 1261 1262static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1263{ 1264 drive_info_struct *drv = get_drv(bdev->bd_disk); 1265 1266 if (!drv->cylinders) 1267 return -ENXIO; 1268 1269 geo->heads = drv->heads; 1270 geo->sectors = drv->sectors; 1271 geo->cylinders = drv->cylinders; 1272 return 0; 1273} 1274 1275static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) 1276{ 1277 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 1278 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 1279 (void)check_for_unit_attention(h, c); 1280} 1281 1282static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) 1283{ 1284 cciss_pci_info_struct pciinfo; 1285 1286 if (!argp) 1287 return -EINVAL; 1288 pciinfo.domain = pci_domain_nr(h->pdev->bus); 1289 pciinfo.bus = h->pdev->bus->number; 1290 pciinfo.dev_fn = h->pdev->devfn; 1291 pciinfo.board_id = h->board_id; 1292 if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct))) 1293 return -EFAULT; 1294 return 0; 1295} 1296 1297static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) 1298{ 1299 cciss_coalint_struct intinfo; 1300 unsigned long flags; 1301 1302 if (!argp) 1303 return -EINVAL; 1304 spin_lock_irqsave(&h->lock, flags); 1305 intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); 1306 intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); 1307 spin_unlock_irqrestore(&h->lock, flags); 1308 if (copy_to_user 1309 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1310 return -EFAULT; 1311 return 0; 1312} 1313 1314static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) 1315{ 1316 cciss_coalint_struct intinfo; 1317 unsigned long flags; 1318 int i; 1319 1320 if (!argp) 1321 return -EINVAL; 1322 if (!capable(CAP_SYS_ADMIN)) 1323 return -EPERM; 1324 if (copy_from_user(&intinfo, argp, sizeof(intinfo))) 1325 return -EFAULT; 1326 if ((intinfo.delay == 0) && (intinfo.count == 0)) 1327 return -EINVAL; 1328 spin_lock_irqsave(&h->lock, flags); 1329 /* Update the field, and then ring the doorbell */ 1330 writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay)); 1331 writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount)); 1332 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1333 1334 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1335 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 1336 break; 1337 udelay(1000); /* delay and try again */ 1338 } 1339 spin_unlock_irqrestore(&h->lock, flags); 1340 if (i >= MAX_IOCTL_CONFIG_WAIT) 1341 return -EAGAIN; 1342 return 0; 1343} 1344 1345static int cciss_getnodename(ctlr_info_t *h, void __user *argp) 1346{ 1347 NodeName_type NodeName; 1348 unsigned long flags; 1349 int i; 1350 1351 if (!argp) 1352 return -EINVAL; 1353 spin_lock_irqsave(&h->lock, flags); 1354 for (i = 0; i < 16; i++) 1355 NodeName[i] = readb(&h->cfgtable->ServerName[i]); 1356 spin_unlock_irqrestore(&h->lock, flags); 1357 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1358 return -EFAULT; 1359 return 0; 1360} 1361 1362static int cciss_setnodename(ctlr_info_t *h, void __user *argp) 1363{ 1364 NodeName_type NodeName; 1365 unsigned long flags; 1366 int i; 1367 1368 if (!argp) 1369 return -EINVAL; 1370 if (!capable(CAP_SYS_ADMIN)) 1371 return -EPERM; 1372 if (copy_from_user(NodeName, argp, sizeof(NodeName_type))) 1373 return -EFAULT; 1374 spin_lock_irqsave(&h->lock, flags); 1375 /* Update the field, and then ring the doorbell */ 1376 for (i = 0; i < 16; i++) 1377 writeb(NodeName[i], &h->cfgtable->ServerName[i]); 1378 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1379 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1380 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 1381 break; 1382 udelay(1000); /* delay and try again */ 1383 } 1384 spin_unlock_irqrestore(&h->lock, flags); 1385 if (i >= MAX_IOCTL_CONFIG_WAIT) 1386 return -EAGAIN; 1387 return 0; 1388} 1389 1390static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) 1391{ 1392 Heartbeat_type heartbeat; 1393 unsigned long flags; 1394 1395 if (!argp) 1396 return -EINVAL; 1397 spin_lock_irqsave(&h->lock, flags); 1398 heartbeat = readl(&h->cfgtable->HeartBeat); 1399 spin_unlock_irqrestore(&h->lock, flags); 1400 if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) 1401 return -EFAULT; 1402 return 0; 1403} 1404 1405static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) 1406{ 1407 BusTypes_type BusTypes; 1408 unsigned long flags; 1409 1410 if (!argp) 1411 return -EINVAL; 1412 spin_lock_irqsave(&h->lock, flags); 1413 BusTypes = readl(&h->cfgtable->BusTypes); 1414 spin_unlock_irqrestore(&h->lock, flags); 1415 if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) 1416 return -EFAULT; 1417 return 0; 1418} 1419 1420static int cciss_getfirmver(ctlr_info_t *h, void __user *argp) 1421{ 1422 FirmwareVer_type firmware; 1423 1424 if (!argp) 1425 return -EINVAL; 1426 memcpy(firmware, h->firm_ver, 4); 1427 1428 if (copy_to_user 1429 (argp, firmware, sizeof(FirmwareVer_type))) 1430 return -EFAULT; 1431 return 0; 1432} 1433 1434static int cciss_getdrivver(ctlr_info_t *h, void __user *argp) 1435{ 1436 DriverVer_type DriverVer = DRIVER_VERSION; 1437 1438 if (!argp) 1439 return -EINVAL; 1440 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) 1441 return -EFAULT; 1442 return 0; 1443} 1444 1445static int cciss_getluninfo(ctlr_info_t *h, 1446 struct gendisk *disk, void __user *argp) 1447{ 1448 LogvolInfo_struct luninfo; 1449 drive_info_struct *drv = get_drv(disk); 1450 1451 if (!argp) 1452 return -EINVAL; 1453 memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); 1454 luninfo.num_opens = drv->usage_count; 1455 luninfo.num_parts = 0; 1456 if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) 1457 return -EFAULT; 1458 return 0; 1459} 1460 1461static int cciss_passthru(ctlr_info_t *h, void __user *argp) 1462{ 1463 IOCTL_Command_struct iocommand; 1464 CommandList_struct *c; 1465 char *buff = NULL; 1466 u64bit temp64; 1467 DECLARE_COMPLETION_ONSTACK(wait); 1468 1469 if (!argp) 1470 return -EINVAL; 1471 1472 if (!capable(CAP_SYS_RAWIO)) 1473 return -EPERM; 1474 1475 if (copy_from_user 1476 (&iocommand, argp, sizeof(IOCTL_Command_struct))) 1477 return -EFAULT; 1478 if ((iocommand.buf_size < 1) && 1479 (iocommand.Request.Type.Direction != XFER_NONE)) { 1480 return -EINVAL; 1481 } 1482 if (iocommand.buf_size > 0) { 1483 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 1484 if (buff == NULL) 1485 return -EFAULT; 1486 } 1487 if (iocommand.Request.Type.Direction == XFER_WRITE) { 1488 /* Copy the data into the buffer we created */ 1489 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { 1490 kfree(buff); 1491 return -EFAULT; 1492 } 1493 } else { 1494 memset(buff, 0, iocommand.buf_size); 1495 } 1496 c = cmd_special_alloc(h); 1497 if (!c) { 1498 kfree(buff); 1499 return -ENOMEM; 1500 } 1501 /* Fill in the command type */ 1502 c->cmd_type = CMD_IOCTL_PEND; 1503 /* Fill in Command Header */ 1504 c->Header.ReplyQueue = 0; /* unused in simple mode */ 1505 if (iocommand.buf_size > 0) { /* buffer to fill */ 1506 c->Header.SGList = 1; 1507 c->Header.SGTotal = 1; 1508 } else { /* no buffers to fill */ 1509 c->Header.SGList = 0; 1510 c->Header.SGTotal = 0; 1511 } 1512 c->Header.LUN = iocommand.LUN_info; 1513 /* use the kernel address the cmd block for tag */ 1514 c->Header.Tag.lower = c->busaddr; 1515 1516 /* Fill in Request block */ 1517 c->Request = iocommand.Request; 1518 1519 /* Fill in the scatter gather information */ 1520 if (iocommand.buf_size > 0) { 1521 temp64.val = pci_map_single(h->pdev, buff, 1522 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 1523 c->SG[0].Addr.lower = temp64.val32.lower; 1524 c->SG[0].Addr.upper = temp64.val32.upper; 1525 c->SG[0].Len = iocommand.buf_size; 1526 c->SG[0].Ext = 0; /* we are not chaining */ 1527 } 1528 c->waiting = &wait; 1529 1530 enqueue_cmd_and_start_io(h, c); 1531 wait_for_completion(&wait); 1532 1533 /* unlock the buffers from DMA */ 1534 temp64.val32.lower = c->SG[0].Addr.lower; 1535 temp64.val32.upper = c->SG[0].Addr.upper; 1536 pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, 1537 PCI_DMA_BIDIRECTIONAL); 1538 check_ioctl_unit_attention(h, c); 1539 1540 /* Copy the error information out */ 1541 iocommand.error_info = *(c->err_info); 1542 if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) { 1543 kfree(buff); 1544 cmd_special_free(h, c); 1545 return -EFAULT; 1546 } 1547 1548 if (iocommand.Request.Type.Direction == XFER_READ) { 1549 /* Copy the data out of the buffer we created */ 1550 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 1551 kfree(buff); 1552 cmd_special_free(h, c); 1553 return -EFAULT; 1554 } 1555 } 1556 kfree(buff); 1557 cmd_special_free(h, c); 1558 return 0; 1559} 1560 1561static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) 1562{ 1563 BIG_IOCTL_Command_struct *ioc; 1564 CommandList_struct *c; 1565 unsigned char **buff = NULL; 1566 int *buff_size = NULL; 1567 u64bit temp64; 1568 BYTE sg_used = 0; 1569 int status = 0; 1570 int i; 1571 DECLARE_COMPLETION_ONSTACK(wait); 1572 __u32 left; 1573 __u32 sz; 1574 BYTE __user *data_ptr; 1575 1576 if (!argp) 1577 return -EINVAL; 1578 if (!capable(CAP_SYS_RAWIO)) 1579 return -EPERM; 1580 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 1581 if (!ioc) { 1582 status = -ENOMEM; 1583 goto cleanup1; 1584 } 1585 if (copy_from_user(ioc, argp, sizeof(*ioc))) { 1586 status = -EFAULT; 1587 goto cleanup1; 1588 } 1589 if ((ioc->buf_size < 1) && 1590 (ioc->Request.Type.Direction != XFER_NONE)) { 1591 status = -EINVAL; 1592 goto cleanup1; 1593 } 1594 /* Check kmalloc limits using all SGs */ 1595 if (ioc->malloc_size > MAX_KMALLOC_SIZE) { 1596 status = -EINVAL; 1597 goto cleanup1; 1598 } 1599 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 1600 status = -EINVAL; 1601 goto cleanup1; 1602 } 1603 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 1604 if (!buff) { 1605 status = -ENOMEM; 1606 goto cleanup1; 1607 } 1608 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 1609 if (!buff_size) { 1610 status = -ENOMEM; 1611 goto cleanup1; 1612 } 1613 left = ioc->buf_size; 1614 data_ptr = ioc->buf; 1615 while (left) { 1616 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; 1617 buff_size[sg_used] = sz; 1618 buff[sg_used] = kmalloc(sz, GFP_KERNEL); 1619 if (buff[sg_used] == NULL) { 1620 status = -ENOMEM; 1621 goto cleanup1; 1622 } 1623 if (ioc->Request.Type.Direction == XFER_WRITE) { 1624 if (copy_from_user(buff[sg_used], data_ptr, sz)) { 1625 status = -EFAULT; 1626 goto cleanup1; 1627 } 1628 } else { 1629 memset(buff[sg_used], 0, sz); 1630 } 1631 left -= sz; 1632 data_ptr += sz; 1633 sg_used++; 1634 } 1635 c = cmd_special_alloc(h); 1636 if (!c) { 1637 status = -ENOMEM; 1638 goto cleanup1; 1639 } 1640 c->cmd_type = CMD_IOCTL_PEND; 1641 c->Header.ReplyQueue = 0; 1642 c->Header.SGList = sg_used; 1643 c->Header.SGTotal = sg_used; 1644 c->Header.LUN = ioc->LUN_info; 1645 c->Header.Tag.lower = c->busaddr; 1646 1647 c->Request = ioc->Request; 1648 for (i = 0; i < sg_used; i++) { 1649 temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], 1650 PCI_DMA_BIDIRECTIONAL); 1651 c->SG[i].Addr.lower = temp64.val32.lower; 1652 c->SG[i].Addr.upper = temp64.val32.upper; 1653 c->SG[i].Len = buff_size[i]; 1654 c->SG[i].Ext = 0; /* we are not chaining */ 1655 } 1656 c->waiting = &wait; 1657 enqueue_cmd_and_start_io(h, c); 1658 wait_for_completion(&wait); 1659 /* unlock the buffers from DMA */ 1660 for (i = 0; i < sg_used; i++) { 1661 temp64.val32.lower = c->SG[i].Addr.lower; 1662 temp64.val32.upper = c->SG[i].Addr.upper; 1663 pci_unmap_single(h->pdev, 1664 (dma_addr_t) temp64.val, buff_size[i], 1665 PCI_DMA_BIDIRECTIONAL); 1666 } 1667 check_ioctl_unit_attention(h, c); 1668 /* Copy the error information out */ 1669 ioc->error_info = *(c->err_info); 1670 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 1671 cmd_special_free(h, c); 1672 status = -EFAULT; 1673 goto cleanup1; 1674 } 1675 if (ioc->Request.Type.Direction == XFER_READ) { 1676 /* Copy the data out of the buffer we created */ 1677 BYTE __user *ptr = ioc->buf; 1678 for (i = 0; i < sg_used; i++) { 1679 if (copy_to_user(ptr, buff[i], buff_size[i])) { 1680 cmd_special_free(h, c); 1681 status = -EFAULT; 1682 goto cleanup1; 1683 } 1684 ptr += buff_size[i]; 1685 } 1686 } 1687 cmd_special_free(h, c); 1688 status = 0; 1689cleanup1: 1690 if (buff) { 1691 for (i = 0; i < sg_used; i++) 1692 kfree(buff[i]); 1693 kfree(buff); 1694 } 1695 kfree(buff_size); 1696 kfree(ioc); 1697 return status; 1698} 1699 1700static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 1701 unsigned int cmd, unsigned long arg) 1702{ 1703 struct gendisk *disk = bdev->bd_disk; 1704 ctlr_info_t *h = get_host(disk); 1705 void __user *argp = (void __user *)arg; 1706 1707 dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", 1708 cmd, arg); 1709 switch (cmd) { 1710 case CCISS_GETPCIINFO: 1711 return cciss_getpciinfo(h, argp); 1712 case CCISS_GETINTINFO: 1713 return cciss_getintinfo(h, argp); 1714 case CCISS_SETINTINFO: 1715 return cciss_setintinfo(h, argp); 1716 case CCISS_GETNODENAME: 1717 return cciss_getnodename(h, argp); 1718 case CCISS_SETNODENAME: 1719 return cciss_setnodename(h, argp); 1720 case CCISS_GETHEARTBEAT: 1721 return cciss_getheartbeat(h, argp); 1722 case CCISS_GETBUSTYPES: 1723 return cciss_getbustypes(h, argp); 1724 case CCISS_GETFIRMVER: 1725 return cciss_getfirmver(h, argp); 1726 case CCISS_GETDRIVVER: 1727 return cciss_getdrivver(h, argp); 1728 case CCISS_DEREGDISK: 1729 case CCISS_REGNEWD: 1730 case CCISS_REVALIDVOLS: 1731 return rebuild_lun_table(h, 0, 1); 1732 case CCISS_GETLUNINFO: 1733 return cciss_getluninfo(h, disk, argp); 1734 case CCISS_PASSTHRU: 1735 return cciss_passthru(h, argp); 1736 case CCISS_BIG_PASSTHRU: 1737 return cciss_bigpassthru(h, argp); 1738 1739 /* scsi_cmd_blk_ioctl handles these, below, though some are not */ 1740 /* very meaningful for cciss. SG_IO is the main one people want. */ 1741 1742 case SG_GET_VERSION_NUM: 1743 case SG_SET_TIMEOUT: 1744 case SG_GET_TIMEOUT: 1745 case SG_GET_RESERVED_SIZE: 1746 case SG_SET_RESERVED_SIZE: 1747 case SG_EMULATED_HOST: 1748 case SG_IO: 1749 case SCSI_IOCTL_SEND_COMMAND: 1750 return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); 1751 1752 /* scsi_cmd_blk_ioctl would normally handle these, below, but */ 1753 /* they aren't a good fit for cciss, as CD-ROMs are */ 1754 /* not supported, and we don't have any bus/target/lun */ 1755 /* which we present to the kernel. */ 1756 1757 case CDROM_SEND_PACKET: 1758 case CDROMCLOSETRAY: 1759 case CDROMEJECT: 1760 case SCSI_IOCTL_GET_IDLUN: 1761 case SCSI_IOCTL_GET_BUS_NUMBER: 1762 default: 1763 return -ENOTTY; 1764 } 1765} 1766 1767static void cciss_check_queues(ctlr_info_t *h) 1768{ 1769 int start_queue = h->next_to_run; 1770 int i; 1771 1772 /* check to see if we have maxed out the number of commands that can 1773 * be placed on the queue. If so then exit. We do this check here 1774 * in case the interrupt we serviced was from an ioctl and did not 1775 * free any new commands. 1776 */ 1777 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) 1778 return; 1779 1780 /* We have room on the queue for more commands. Now we need to queue 1781 * them up. We will also keep track of the next queue to run so 1782 * that every queue gets a chance to be started first. 1783 */ 1784 for (i = 0; i < h->highest_lun + 1; i++) { 1785 int curr_queue = (start_queue + i) % (h->highest_lun + 1); 1786 /* make sure the disk has been added and the drive is real 1787 * because this can be called from the middle of init_one. 1788 */ 1789 if (!h->drv[curr_queue]) 1790 continue; 1791 if (!(h->drv[curr_queue]->queue) || 1792 !(h->drv[curr_queue]->heads)) 1793 continue; 1794 blk_start_queue(h->gendisk[curr_queue]->queue); 1795 1796 /* check to see if we have maxed out the number of commands 1797 * that can be placed on the queue. 1798 */ 1799 if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { 1800 if (curr_queue == start_queue) { 1801 h->next_to_run = 1802 (start_queue + 1) % (h->highest_lun + 1); 1803 break; 1804 } else { 1805 h->next_to_run = curr_queue; 1806 break; 1807 } 1808 } 1809 } 1810} 1811 1812static void cciss_softirq_done(struct request *rq) 1813{ 1814 CommandList_struct *c = rq->completion_data; 1815 ctlr_info_t *h = hba[c->ctlr]; 1816 SGDescriptor_struct *curr_sg = c->SG; 1817 u64bit temp64; 1818 unsigned long flags; 1819 int i, ddir; 1820 int sg_index = 0; 1821 1822 if (c->Request.Type.Direction == XFER_READ) 1823 ddir = PCI_DMA_FROMDEVICE; 1824 else 1825 ddir = PCI_DMA_TODEVICE; 1826 1827 /* command did not need to be retried */ 1828 /* unmap the DMA mapping for all the scatter gather elements */ 1829 for (i = 0; i < c->Header.SGList; i++) { 1830 if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { 1831 cciss_unmap_sg_chain_block(h, c); 1832 /* Point to the next block */ 1833 curr_sg = h->cmd_sg_list[c->cmdindex]; 1834 sg_index = 0; 1835 } 1836 temp64.val32.lower = curr_sg[sg_index].Addr.lower; 1837 temp64.val32.upper = curr_sg[sg_index].Addr.upper; 1838 pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len, 1839 ddir); 1840 ++sg_index; 1841 } 1842 1843 dev_dbg(&h->pdev->dev, "Done with %p\n", rq); 1844 1845 /* set the residual count for pc requests */ 1846 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1847 rq->resid_len = c->err_info->ResidualCnt; 1848 1849 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); 1850 1851 spin_lock_irqsave(&h->lock, flags); 1852 cmd_free(h, c); 1853 cciss_check_queues(h); 1854 spin_unlock_irqrestore(&h->lock, flags); 1855} 1856 1857static inline void log_unit_to_scsi3addr(ctlr_info_t *h, 1858 unsigned char scsi3addr[], uint32_t log_unit) 1859{ 1860 memcpy(scsi3addr, h->drv[log_unit]->LunID, 1861 sizeof(h->drv[log_unit]->LunID)); 1862} 1863 1864/* This function gets the SCSI vendor, model, and revision of a logical drive 1865 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if 1866 * they cannot be read. 1867 */ 1868static void cciss_get_device_descr(ctlr_info_t *h, int logvol, 1869 char *vendor, char *model, char *rev) 1870{ 1871 int rc; 1872 InquiryData_struct *inq_buf; 1873 unsigned char scsi3addr[8]; 1874 1875 *vendor = '\0'; 1876 *model = '\0'; 1877 *rev = '\0'; 1878 1879 inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1880 if (!inq_buf) 1881 return; 1882 1883 log_unit_to_scsi3addr(h, scsi3addr, logvol); 1884 rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, 1885 scsi3addr, TYPE_CMD); 1886 if (rc == IO_OK) { 1887 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); 1888 vendor[VENDOR_LEN] = '\0'; 1889 memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); 1890 model[MODEL_LEN] = '\0'; 1891 memcpy(rev, &inq_buf->data_byte[32], REV_LEN); 1892 rev[REV_LEN] = '\0'; 1893 } 1894 1895 kfree(inq_buf); 1896 return; 1897} 1898 1899/* This function gets the serial number of a logical drive via 1900 * inquiry page 0x83. Serial no. is 16 bytes. If the serial 1901 * number cannot be had, for whatever reason, 16 bytes of 0xff 1902 * are returned instead. 1903 */ 1904static void cciss_get_serial_no(ctlr_info_t *h, int logvol, 1905 unsigned char *serial_no, int buflen) 1906{ 1907#define PAGE_83_INQ_BYTES 64 1908 int rc; 1909 unsigned char *buf; 1910 unsigned char scsi3addr[8]; 1911 1912 if (buflen > 16) 1913 buflen = 16; 1914 memset(serial_no, 0xff, buflen); 1915 buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); 1916 if (!buf) 1917 return; 1918 memset(serial_no, 0, buflen); 1919 log_unit_to_scsi3addr(h, scsi3addr, logvol); 1920 rc = sendcmd_withirq(h, CISS_INQUIRY, buf, 1921 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); 1922 if (rc == IO_OK) 1923 memcpy(serial_no, &buf[8], buflen); 1924 kfree(buf); 1925 return; 1926} 1927 1928/* 1929 * cciss_add_disk sets up the block device queue for a logical drive 1930 */ 1931static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1932 int drv_index) 1933{ 1934 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1935 if (!disk->queue) 1936 goto init_queue_failure; 1937 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1938 disk->major = h->major; 1939 disk->first_minor = drv_index << NWD_SHIFT; 1940 disk->fops = &cciss_fops; 1941 if (cciss_create_ld_sysfs_entry(h, drv_index)) 1942 goto cleanup_queue; 1943 disk->private_data = h->drv[drv_index]; 1944 disk->driverfs_dev = &h->drv[drv_index]->dev; 1945 1946 /* Set up queue information */ 1947 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1948 1949 /* This is a hardware imposed limit. */ 1950 blk_queue_max_segments(disk->queue, h->maxsgentries); 1951 1952 blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); 1953 1954 blk_queue_softirq_done(disk->queue, cciss_softirq_done); 1955 1956 disk->queue->queuedata = h; 1957 1958 blk_queue_logical_block_size(disk->queue, 1959 h->drv[drv_index]->block_size); 1960 1961 /* Make sure all queue data is written out before */ 1962 /* setting h->drv[drv_index]->queue, as setting this */ 1963 /* allows the interrupt handler to start the queue */ 1964 wmb(); 1965 h->drv[drv_index]->queue = disk->queue; 1966 add_disk(disk); 1967 return 0; 1968 1969cleanup_queue: 1970 blk_cleanup_queue(disk->queue); 1971 disk->queue = NULL; 1972init_queue_failure: 1973 return -1; 1974} 1975 1976/* This function will check the usage_count of the drive to be updated/added. 1977 * If the usage_count is zero and it is a heretofore unknown drive, or, 1978 * the drive's capacity, geometry, or serial number has changed, 1979 * then the drive information will be updated and the disk will be 1980 * re-registered with the kernel. If these conditions don't hold, 1981 * then it will be left alone for the next reboot. The exception to this 1982 * is disk 0 which will always be left registered with the kernel since it 1983 * is also the controller node. Any changes to disk 0 will show up on 1984 * the next reboot. 1985 */ 1986static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, 1987 int first_time, int via_ioctl) 1988{ 1989 struct gendisk *disk; 1990 InquiryData_struct *inq_buff = NULL; 1991 unsigned int block_size; 1992 sector_t total_size; 1993 unsigned long flags = 0; 1994 int ret = 0; 1995 drive_info_struct *drvinfo; 1996 1997 /* Get information about the disk and modify the driver structure */ 1998 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1999 drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); 2000 if (inq_buff == NULL || drvinfo == NULL) 2001 goto mem_msg; 2002 2003 /* testing to see if 16-byte CDBs are already being used */ 2004 if (h->cciss_read == CCISS_READ_16) { 2005 cciss_read_capacity_16(h, drv_index, 2006 &total_size, &block_size); 2007 2008 } else { 2009 cciss_read_capacity(h, drv_index, &total_size, &block_size); 2010 /* if read_capacity returns all F's this volume is >2TB */ 2011 /* in size so we switch to 16-byte CDB's for all */ 2012 /* read/write ops */ 2013 if (total_size == 0xFFFFFFFFULL) { 2014 cciss_read_capacity_16(h, drv_index, 2015 &total_size, &block_size); 2016 h->cciss_read = CCISS_READ_16; 2017 h->cciss_write = CCISS_WRITE_16; 2018 } else { 2019 h->cciss_read = CCISS_READ_10; 2020 h->cciss_write = CCISS_WRITE_10; 2021 } 2022 } 2023 2024 cciss_geometry_inquiry(h, drv_index, total_size, block_size, 2025 inq_buff, drvinfo); 2026 drvinfo->block_size = block_size; 2027 drvinfo->nr_blocks = total_size + 1; 2028 2029 cciss_get_device_descr(h, drv_index, drvinfo->vendor, 2030 drvinfo->model, drvinfo->rev); 2031 cciss_get_serial_no(h, drv_index, drvinfo->serial_no, 2032 sizeof(drvinfo->serial_no)); 2033 /* Save the lunid in case we deregister the disk, below. */ 2034 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, 2035 sizeof(drvinfo->LunID)); 2036 2037 /* Is it the same disk we already know, and nothing's changed? */ 2038 if (h->drv[drv_index]->raid_level != -1 && 2039 ((memcmp(drvinfo->serial_no, 2040 h->drv[drv_index]->serial_no, 16) == 0) && 2041 drvinfo->block_size == h->drv[drv_index]->block_size && 2042 drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && 2043 drvinfo->heads == h->drv[drv_index]->heads && 2044 drvinfo->sectors == h->drv[drv_index]->sectors && 2045 drvinfo->cylinders == h->drv[drv_index]->cylinders)) 2046 /* The disk is unchanged, nothing to update */ 2047 goto freeret; 2048 2049 /* If we get here it's not the same disk, or something's changed, 2050 * so we need to * deregister it, and re-register it, if it's not 2051 * in use. 2052 * If the disk already exists then deregister it before proceeding 2053 * (unless it's the first disk (for the controller node). 2054 */ 2055 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { 2056 dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); 2057 spin_lock_irqsave(&h->lock, flags); 2058 h->drv[drv_index]->busy_configuring = 1; 2059 spin_unlock_irqrestore(&h->lock, flags); 2060 2061 /* deregister_disk sets h->drv[drv_index]->queue = NULL 2062 * which keeps the interrupt handler from starting 2063 * the queue. 2064 */ 2065 ret = deregister_disk(h, drv_index, 0, via_ioctl); 2066 } 2067 2068 /* If the disk is in use return */ 2069 if (ret) 2070 goto freeret; 2071 2072 /* Save the new information from cciss_geometry_inquiry 2073 * and serial number inquiry. If the disk was deregistered 2074 * above, then h->drv[drv_index] will be NULL. 2075 */ 2076 if (h->drv[drv_index] == NULL) { 2077 drvinfo->device_initialized = 0; 2078 h->drv[drv_index] = drvinfo; 2079 drvinfo = NULL; /* so it won't be freed below. */ 2080 } else { 2081 /* special case for cxd0 */ 2082 h->drv[drv_index]->block_size = drvinfo->block_size; 2083 h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; 2084 h->drv[drv_index]->heads = drvinfo->heads; 2085 h->drv[drv_index]->sectors = drvinfo->sectors; 2086 h->drv[drv_index]->cylinders = drvinfo->cylinders; 2087 h->drv[drv_index]->raid_level = drvinfo->raid_level; 2088 memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); 2089 memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, 2090 VENDOR_LEN + 1); 2091 memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); 2092 memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); 2093 } 2094 2095 ++h->num_luns; 2096 disk = h->gendisk[drv_index]; 2097 set_capacity(disk, h->drv[drv_index]->nr_blocks); 2098 2099 /* If it's not disk 0 (drv_index != 0) 2100 * or if it was disk 0, but there was previously 2101 * no actual corresponding configured logical drive 2102 * (raid_leve == -1) then we want to update the 2103 * logical drive's information. 2104 */ 2105 if (drv_index || first_time) { 2106 if (cciss_add_disk(h, disk, drv_index) != 0) { 2107 cciss_free_gendisk(h, drv_index); 2108 cciss_free_drive_info(h, drv_index); 2109 dev_warn(&h->pdev->dev, "could not update disk %d\n", 2110 drv_index); 2111 --h->num_luns; 2112 } 2113 } 2114 2115freeret: 2116 kfree(inq_buff); 2117 kfree(drvinfo); 2118 return; 2119mem_msg: 2120 dev_err(&h->pdev->dev, "out of memory\n"); 2121 goto freeret; 2122} 2123 2124/* This function will find the first index of the controllers drive array 2125 * that has a null drv pointer and allocate the drive info struct and 2126 * will return that index This is where new drives will be added. 2127 * If the index to be returned is greater than the highest_lun index for 2128 * the controller then highest_lun is set * to this new index. 2129 * If there are no available indexes or if tha allocation fails, then -1 2130 * is returned. * "controller_node" is used to know if this is a real 2131 * logical drive, or just the controller node, which determines if this 2132 * counts towards highest_lun. 2133 */ 2134static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) 2135{ 2136 int i; 2137 drive_info_struct *drv; 2138 2139 /* Search for an empty slot for our drive info */ 2140 for (i = 0; i < CISS_MAX_LUN; i++) { 2141 2142 /* if not cxd0 case, and it's occupied, skip it. */ 2143 if (h->drv[i] && i != 0) 2144 continue; 2145 /* 2146 * If it's cxd0 case, and drv is alloc'ed already, and a 2147 * disk is configured there, skip it. 2148 */ 2149 if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) 2150 continue; 2151 2152 /* 2153 * We've found an empty slot. Update highest_lun 2154 * provided this isn't just the fake cxd0 controller node. 2155 */ 2156 if (i > h->highest_lun && !controller_node) 2157 h->highest_lun = i; 2158 2159 /* If adding a real disk at cxd0, and it's already alloc'ed */ 2160 if (i == 0 && h->drv[i] != NULL) 2161 return i; 2162 2163 /* 2164 * Found an empty slot, not already alloc'ed. Allocate it. 2165 * Mark it with raid_level == -1, so we know it's new later on. 2166 */ 2167 drv = kzalloc(sizeof(*drv), GFP_KERNEL); 2168 if (!drv) 2169 return -1; 2170 drv->raid_level = -1; /* so we know it's new */ 2171 h->drv[i] = drv; 2172 return i; 2173 } 2174 return -1; 2175} 2176 2177static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) 2178{ 2179 kfree(h->drv[drv_index]); 2180 h->drv[drv_index] = NULL; 2181} 2182 2183static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) 2184{ 2185 put_disk(h->gendisk[drv_index]); 2186 h->gendisk[drv_index] = NULL; 2187} 2188 2189/* cciss_add_gendisk finds a free hba[]->drv structure 2190 * and allocates a gendisk if needed, and sets the lunid 2191 * in the drvinfo structure. It returns the index into 2192 * the ->drv[] array, or -1 if none are free. 2193 * is_controller_node indicates whether highest_lun should 2194 * count this disk, or if it's only being added to provide 2195 * a means to talk to the controller in case no logical 2196 * drives have yet been configured. 2197 */ 2198static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], 2199 int controller_node) 2200{ 2201 int drv_index; 2202 2203 drv_index = cciss_alloc_drive_info(h, controller_node); 2204 if (drv_index == -1) 2205 return -1; 2206 2207 /*Check if the gendisk needs to be allocated */ 2208 if (!h->gendisk[drv_index]) { 2209 h->gendisk[drv_index] = 2210 alloc_disk(1 << NWD_SHIFT); 2211 if (!h->gendisk[drv_index]) { 2212 dev_err(&h->pdev->dev, 2213 "could not allocate a new disk %d\n", 2214 drv_index); 2215 goto err_free_drive_info; 2216 } 2217 } 2218 memcpy(h->drv[drv_index]->LunID, lunid, 2219 sizeof(h->drv[drv_index]->LunID)); 2220 if (cciss_create_ld_sysfs_entry(h, drv_index)) 2221 goto err_free_disk; 2222 /* Don't need to mark this busy because nobody */ 2223 /* else knows about this disk yet to contend */ 2224 /* for access to it. */ 2225 h->drv[drv_index]->busy_configuring = 0; 2226 wmb(); 2227 return drv_index; 2228 2229err_free_disk: 2230 cciss_free_gendisk(h, drv_index); 2231err_free_drive_info: 2232 cciss_free_drive_info(h, drv_index); 2233 return -1; 2234} 2235 2236/* This is for the special case of a controller which 2237 * has no logical drives. In this case, we still need 2238 * to register a disk so the controller can be accessed 2239 * by the Array Config Utility. 2240 */ 2241static void cciss_add_controller_node(ctlr_info_t *h) 2242{ 2243 struct gendisk *disk; 2244 int drv_index; 2245 2246 if (h->gendisk[0] != NULL) /* already did this? Then bail. */ 2247 return; 2248 2249 drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); 2250 if (drv_index == -1) 2251 goto error; 2252 h->drv[drv_index]->block_size = 512; 2253 h->drv[drv_index]->nr_blocks = 0; 2254 h->drv[drv_index]->heads = 0; 2255 h->drv[drv_index]->sectors = 0; 2256 h->drv[drv_index]->cylinders = 0; 2257 h->drv[drv_index]->raid_level = -1; 2258 memset(h->drv[drv_index]->serial_no, 0, 16); 2259 disk = h->gendisk[drv_index]; 2260 if (cciss_add_disk(h, disk, drv_index) == 0) 2261 return; 2262 cciss_free_gendisk(h, drv_index); 2263 cciss_free_drive_info(h, drv_index); 2264error: 2265 dev_warn(&h->pdev->dev, "could not add disk 0.\n"); 2266 return; 2267} 2268 2269/* This function will add and remove logical drives from the Logical 2270 * drive array of the controller and maintain persistency of ordering 2271 * so that mount points are preserved until the next reboot. This allows 2272 * for the removal of logical drives in the middle of the drive array 2273 * without a re-ordering of those drives. 2274 * INPUT 2275 * h = The controller to perform the operations on 2276 */ 2277static int rebuild_lun_table(ctlr_info_t *h, int first_time, 2278 int via_ioctl) 2279{ 2280 int num_luns; 2281 ReportLunData_struct *ld_buff = NULL; 2282 int return_code; 2283 int listlength = 0; 2284 int i; 2285 int drv_found; 2286 int drv_index = 0; 2287 unsigned char lunid[8] = CTLR_LUNID; 2288 unsigned long flags; 2289 2290 if (!capable(CAP_SYS_RAWIO)) 2291 return -EPERM; 2292 2293 /* Set busy_configuring flag for this operation */ 2294 spin_lock_irqsave(&h->lock, flags); 2295 if (h->busy_configuring) { 2296 spin_unlock_irqrestore(&h->lock, flags); 2297 return -EBUSY; 2298 } 2299 h->busy_configuring = 1; 2300 spin_unlock_irqrestore(&h->lock, flags); 2301 2302 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 2303 if (ld_buff == NULL) 2304 goto mem_msg; 2305 2306 return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, 2307 sizeof(ReportLunData_struct), 2308 0, CTLR_LUNID, TYPE_CMD); 2309 2310 if (return_code == IO_OK) 2311 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 2312 else { /* reading number of logical volumes failed */ 2313 dev_warn(&h->pdev->dev, 2314 "report logical volume command failed\n"); 2315 listlength = 0; 2316 goto freeret; 2317 } 2318 2319 num_luns = listlength / 8; /* 8 bytes per entry */ 2320 if (num_luns > CISS_MAX_LUN) { 2321 num_luns = CISS_MAX_LUN; 2322 dev_warn(&h->pdev->dev, "more luns configured" 2323 " on controller than can be handled by" 2324 " this driver.\n"); 2325 } 2326 2327 if (num_luns == 0) 2328 cciss_add_controller_node(h); 2329 2330 /* Compare controller drive array to driver's drive array 2331 * to see if any drives are missing on the controller due 2332 * to action of Array Config Utility (user deletes drive) 2333 * and deregister logical drives which have disappeared. 2334 */ 2335 for (i = 0; i <= h->highest_lun; i++) { 2336 int j; 2337 drv_found = 0; 2338 2339 /* skip holes in the array from already deleted drives */ 2340 if (h->drv[i] == NULL) 2341 continue; 2342 2343 for (j = 0; j < num_luns; j++) { 2344 memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); 2345 if (memcmp(h->drv[i]->LunID, lunid, 2346 sizeof(lunid)) == 0) { 2347 drv_found = 1; 2348 break; 2349 } 2350 } 2351 if (!drv_found) { 2352 /* Deregister it from the OS, it's gone. */ 2353 spin_lock_irqsave(&h->lock, flags); 2354 h->drv[i]->busy_configuring = 1; 2355 spin_unlock_irqrestore(&h->lock, flags); 2356 return_code = deregister_disk(h, i, 1, via_ioctl); 2357 if (h->drv[i] != NULL) 2358 h->drv[i]->busy_configuring = 0; 2359 } 2360 } 2361 2362 /* Compare controller drive array to driver's drive array. 2363 * Check for updates in the drive information and any new drives 2364 * on the controller due to ACU adding logical drives, or changing 2365 * a logical drive's size, etc. Reregister any new/changed drives 2366 */ 2367 for (i = 0; i < num_luns; i++) { 2368 int j; 2369 2370 drv_found = 0; 2371 2372 memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); 2373 /* Find if the LUN is already in the drive array 2374 * of the driver. If so then update its info 2375 * if not in use. If it does not exist then find 2376 * the first free index and add it. 2377 */ 2378 for (j = 0; j <= h->highest_lun; j++) { 2379 if (h->drv[j] != NULL && 2380 memcmp(h->drv[j]->LunID, lunid, 2381 sizeof(h->drv[j]->LunID)) == 0) { 2382 drv_index = j; 2383 drv_found = 1; 2384 break; 2385 } 2386 } 2387 2388 /* check if the drive was found already in the array */ 2389 if (!drv_found) { 2390 drv_index = cciss_add_gendisk(h, lunid, 0); 2391 if (drv_index == -1) 2392 goto freeret; 2393 } 2394 cciss_update_drive_info(h, drv_index, first_time, via_ioctl); 2395 } /* end for */ 2396 2397freeret: 2398 kfree(ld_buff); 2399 h->busy_configuring = 0; 2400 /* We return -1 here to tell the ACU that we have registered/updated 2401 * all of the drives that we can and to keep it from calling us 2402 * additional times. 2403 */ 2404 return -1; 2405mem_msg: 2406 dev_err(&h->pdev->dev, "out of memory\n"); 2407 h->busy_configuring = 0; 2408 goto freeret; 2409} 2410 2411static void cciss_clear_drive_info(drive_info_struct *drive_info) 2412{ 2413 /* zero out the disk size info */ 2414 drive_info->nr_blocks = 0; 2415 drive_info->block_size = 0; 2416 drive_info->heads = 0; 2417 drive_info->sectors = 0; 2418 drive_info->cylinders = 0; 2419 drive_info->raid_level = -1; 2420 memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); 2421 memset(drive_info->model, 0, sizeof(drive_info->model)); 2422 memset(drive_info->rev, 0, sizeof(drive_info->rev)); 2423 memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); 2424 /* 2425 * don't clear the LUNID though, we need to remember which 2426 * one this one is. 2427 */ 2428} 2429 2430/* This function will deregister the disk and it's queue from the 2431 * kernel. It must be called with the controller lock held and the 2432 * drv structures busy_configuring flag set. It's parameters are: 2433 * 2434 * disk = This is the disk to be deregistered 2435 * drv = This is the drive_info_struct associated with the disk to be 2436 * deregistered. It contains information about the disk used 2437 * by the driver. 2438 * clear_all = This flag determines whether or not the disk information 2439 * is going to be completely cleared out and the highest_lun 2440 * reset. Sometimes we want to clear out information about 2441 * the disk in preparation for re-adding it. In this case 2442 * the highest_lun should be left unchanged and the LunID 2443 * should not be cleared. 2444 * via_ioctl 2445 * This indicates whether we've reached this path via ioctl. 2446 * This affects the maximum usage count allowed for c0d0 to be messed with. 2447 * If this path is reached via ioctl(), then the max_usage_count will 2448 * be 1, as the process calling ioctl() has got to have the device open. 2449 * If we get here via sysfs, then the max usage count will be zero. 2450*/ 2451static int deregister_disk(ctlr_info_t *h, int drv_index, 2452 int clear_all, int via_ioctl) 2453{ 2454 int i; 2455 struct gendisk *disk; 2456 drive_info_struct *drv; 2457 int recalculate_highest_lun; 2458 2459 if (!capable(CAP_SYS_RAWIO)) 2460 return -EPERM; 2461 2462 drv = h->drv[drv_index]; 2463 disk = h->gendisk[drv_index]; 2464 2465 /* make sure logical volume is NOT is use */ 2466 if (clear_all || (h->gendisk[0] == disk)) { 2467 if (drv->usage_count > via_ioctl) 2468 return -EBUSY; 2469 } else if (drv->usage_count > 0) 2470 return -EBUSY; 2471 2472 recalculate_highest_lun = (drv == h->drv[h->highest_lun]); 2473 2474 /* invalidate the devices and deregister the disk. If it is disk 2475 * zero do not deregister it but just zero out it's values. This 2476 * allows us to delete disk zero but keep the controller registered. 2477 */ 2478 if (h->gendisk[0] != disk) { 2479 struct request_queue *q = disk->queue; 2480 if (disk->flags & GENHD_FL_UP) { 2481 cciss_destroy_ld_sysfs_entry(h, drv_index, 0); 2482 del_gendisk(disk); 2483 } 2484 if (q) 2485 blk_cleanup_queue(q); 2486 /* If clear_all is set then we are deleting the logical 2487 * drive, not just refreshing its info. For drives 2488 * other than disk 0 we will call put_disk. We do not 2489 * do this for disk 0 as we need it to be able to 2490 * configure the controller. 2491 */ 2492 if (clear_all){ 2493 /* This isn't pretty, but we need to find the 2494 * disk in our array and NULL our the pointer. 2495 * This is so that we will call alloc_disk if 2496 * this index is used again later. 2497 */ 2498 for (i=0; i < CISS_MAX_LUN; i++){ 2499 if (h->gendisk[i] == disk) { 2500 h->gendisk[i] = NULL; 2501 break; 2502 } 2503 } 2504 put_disk(disk); 2505 } 2506 } else { 2507 set_capacity(disk, 0); 2508 cciss_clear_drive_info(drv); 2509 } 2510 2511 --h->num_luns; 2512 2513 /* if it was the last disk, find the new hightest lun */ 2514 if (clear_all && recalculate_highest_lun) { 2515 int newhighest = -1; 2516 for (i = 0; i <= h->highest_lun; i++) { 2517 /* if the disk has size > 0, it is available */ 2518 if (h->drv[i] && h->drv[i]->heads) 2519 newhighest = i; 2520 } 2521 h->highest_lun = newhighest; 2522 } 2523 return 0; 2524} 2525 2526static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, 2527 size_t size, __u8 page_code, unsigned char *scsi3addr, 2528 int cmd_type) 2529{ 2530 u64bit buff_dma_handle; 2531 int status = IO_OK; 2532 2533 c->cmd_type = CMD_IOCTL_PEND; 2534 c->Header.ReplyQueue = 0; 2535 if (buff != NULL) { 2536 c->Header.SGList = 1; 2537 c->Header.SGTotal = 1; 2538 } else { 2539 c->Header.SGList = 0; 2540 c->Header.SGTotal = 0; 2541 } 2542 c->Header.Tag.lower = c->busaddr; 2543 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2544 2545 c->Request.Type.Type = cmd_type; 2546 if (cmd_type == TYPE_CMD) { 2547 switch (cmd) { 2548 case CISS_INQUIRY: 2549 /* are we trying to read a vital product page */ 2550 if (page_code != 0) { 2551 c->Request.CDB[1] = 0x01; 2552 c->Request.CDB[2] = page_code; 2553 } 2554 c->Request.CDBLen = 6; 2555 c->Request.Type.Attribute = ATTR_SIMPLE; 2556 c->Request.Type.Direction = XFER_READ; 2557 c->Request.Timeout = 0; 2558 c->Request.CDB[0] = CISS_INQUIRY; 2559 c->Request.CDB[4] = size & 0xFF; 2560 break; 2561 case CISS_REPORT_LOG: 2562 case CISS_REPORT_PHYS: 2563 /* Talking to controller so It's a physical command 2564 mode = 00 target = 0. Nothing to write. 2565 */ 2566 c->Request.CDBLen = 12; 2567 c->Request.Type.Attribute = ATTR_SIMPLE; 2568 c->Request.Type.Direction = XFER_READ; 2569 c->Request.Timeout = 0; 2570 c->Request.CDB[0] = cmd; 2571 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ 2572 c->Request.CDB[7] = (size >> 16) & 0xFF; 2573 c->Request.CDB[8] = (size >> 8) & 0xFF; 2574 c->Request.CDB[9] = size & 0xFF; 2575 break; 2576 2577 case CCISS_READ_CAPACITY: 2578 c->Request.CDBLen = 10; 2579 c->Request.Type.Attribute = ATTR_SIMPLE; 2580 c->Request.Type.Direction = XFER_READ; 2581 c->Request.Timeout = 0; 2582 c->Request.CDB[0] = cmd; 2583 break; 2584 case CCISS_READ_CAPACITY_16: 2585 c->Request.CDBLen = 16; 2586 c->Request.Type.Attribute = ATTR_SIMPLE; 2587 c->Request.Type.Direction = XFER_READ; 2588 c->Request.Timeout = 0; 2589 c->Request.CDB[0] = cmd; 2590 c->Request.CDB[1] = 0x10; 2591 c->Request.CDB[10] = (size >> 24) & 0xFF; 2592 c->Request.CDB[11] = (size >> 16) & 0xFF; 2593 c->Request.CDB[12] = (size >> 8) & 0xFF; 2594 c->Request.CDB[13] = size & 0xFF; 2595 c->Request.Timeout = 0; 2596 c->Request.CDB[0] = cmd; 2597 break; 2598 case CCISS_CACHE_FLUSH: 2599 c->Request.CDBLen = 12; 2600 c->Request.Type.Attribute = ATTR_SIMPLE; 2601 c->Request.Type.Direction = XFER_WRITE; 2602 c->Request.Timeout = 0; 2603 c->Request.CDB[0] = BMIC_WRITE; 2604 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2605 c->Request.CDB[7] = (size >> 8) & 0xFF; 2606 c->Request.CDB[8] = size & 0xFF; 2607 break; 2608 case TEST_UNIT_READY: 2609 c->Request.CDBLen = 6; 2610 c->Request.Type.Attribute = ATTR_SIMPLE; 2611 c->Request.Type.Direction = XFER_NONE; 2612 c->Request.Timeout = 0; 2613 break; 2614 default: 2615 dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); 2616 return IO_ERROR; 2617 } 2618 } else if (cmd_type == TYPE_MSG) { 2619 switch (cmd) { 2620 case CCISS_ABORT_MSG: 2621 c->Request.CDBLen = 12; 2622 c->Request.Type.Attribute = ATTR_SIMPLE; 2623 c->Request.Type.Direction = XFER_WRITE; 2624 c->Request.Timeout = 0; 2625 c->Request.CDB[0] = cmd; /* abort */ 2626 c->Request.CDB[1] = 0; /* abort a command */ 2627 /* buff contains the tag of the command to abort */ 2628 memcpy(&c->Request.CDB[4], buff, 8); 2629 break; 2630 case CCISS_RESET_MSG: 2631 c->Request.CDBLen = 16; 2632 c->Request.Type.Attribute = ATTR_SIMPLE; 2633 c->Request.Type.Direction = XFER_NONE; 2634 c->Request.Timeout = 0; 2635 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2636 c->Request.CDB[0] = cmd; /* reset */ 2637 c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET; 2638 break; 2639 case CCISS_NOOP_MSG: 2640 c->Request.CDBLen = 1; 2641 c->Request.Type.Attribute = ATTR_SIMPLE; 2642 c->Request.Type.Direction = XFER_WRITE; 2643 c->Request.Timeout = 0; 2644 c->Request.CDB[0] = cmd; 2645 break; 2646 default: 2647 dev_warn(&h->pdev->dev, 2648 "unknown message type %d\n", cmd); 2649 return IO_ERROR; 2650 } 2651 } else { 2652 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2653 return IO_ERROR; 2654 } 2655 /* Fill in the scatter gather information */ 2656 if (size > 0) { 2657 buff_dma_handle.val = (__u64) pci_map_single(h->pdev, 2658 buff, size, 2659 PCI_DMA_BIDIRECTIONAL); 2660 c->SG[0].Addr.lower = buff_dma_handle.val32.lower; 2661 c->SG[0].Addr.upper = buff_dma_handle.val32.upper; 2662 c->SG[0].Len = size; 2663 c->SG[0].Ext = 0; /* we are not chaining */ 2664 } 2665 return status; 2666} 2667 2668static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr, 2669 u8 reset_type) 2670{ 2671 CommandList_struct *c; 2672 int return_status; 2673 2674 c = cmd_alloc(h); 2675 if (!c) 2676 return -ENOMEM; 2677 return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0, 2678 CTLR_LUNID, TYPE_MSG); 2679 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ 2680 if (return_status != IO_OK) { 2681 cmd_special_free(h, c); 2682 return return_status; 2683 } 2684 c->waiting = NULL; 2685 enqueue_cmd_and_start_io(h, c); 2686 /* Don't wait for completion, the reset won't complete. Don't free 2687 * the command either. This is the last command we will send before 2688 * re-initializing everything, so it doesn't matter and won't leak. 2689 */ 2690 return 0; 2691} 2692 2693static int check_target_status(ctlr_info_t *h, CommandList_struct *c) 2694{ 2695 switch (c->err_info->ScsiStatus) { 2696 case SAM_STAT_GOOD: 2697 return IO_OK; 2698 case SAM_STAT_CHECK_CONDITION: 2699 switch (0xf & c->err_info->SenseInfo[2]) { 2700 case 0: return IO_OK; /* no sense */ 2701 case 1: return IO_OK; /* recovered error */ 2702 default: 2703 if (check_for_unit_attention(h, c)) 2704 return IO_NEEDS_RETRY; 2705 dev_warn(&h->pdev->dev, "cmd 0x%02x " 2706 "check condition, sense key = 0x%02x\n", 2707 c->Request.CDB[0], c->err_info->SenseInfo[2]); 2708 } 2709 break; 2710 default: 2711 dev_warn(&h->pdev->dev, "cmd 0x%02x" 2712 "scsi status = 0x%02x\n", 2713 c->Request.CDB[0], c->err_info->ScsiStatus); 2714 break; 2715 } 2716 return IO_ERROR; 2717} 2718 2719static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) 2720{ 2721 int return_status = IO_OK; 2722 2723 if (c->err_info->CommandStatus == CMD_SUCCESS) 2724 return IO_OK; 2725 2726 switch (c->err_info->CommandStatus) { 2727 case CMD_TARGET_STATUS: 2728 return_status = check_target_status(h, c); 2729 break; 2730 case CMD_DATA_UNDERRUN: 2731 case CMD_DATA_OVERRUN: 2732 /* expected for inquiry and report lun commands */ 2733 break; 2734 case CMD_INVALID: 2735 dev_warn(&h->pdev->dev, "cmd 0x%02x is " 2736 "reported invalid\n", c->Request.CDB[0]); 2737 return_status = IO_ERROR; 2738 break; 2739 case CMD_PROTOCOL_ERR: 2740 dev_warn(&h->pdev->dev, "cmd 0x%02x has " 2741 "protocol error\n", c->Request.CDB[0]); 2742 return_status = IO_ERROR; 2743 break; 2744 case CMD_HARDWARE_ERR: 2745 dev_warn(&h->pdev->dev, "cmd 0x%02x had " 2746 " hardware error\n", c->Request.CDB[0]); 2747 return_status = IO_ERROR; 2748 break; 2749 case CMD_CONNECTION_LOST: 2750 dev_warn(&h->pdev->dev, "cmd 0x%02x had " 2751 "connection lost\n", c->Request.CDB[0]); 2752 return_status = IO_ERROR; 2753 break; 2754 case CMD_ABORTED: 2755 dev_warn(&h->pdev->dev, "cmd 0x%02x was " 2756 "aborted\n", c->Request.CDB[0]); 2757 return_status = IO_ERROR; 2758 break; 2759 case CMD_ABORT_FAILED: 2760 dev_warn(&h->pdev->dev, "cmd 0x%02x reports " 2761 "abort failed\n", c->Request.CDB[0]); 2762 return_status = IO_ERROR; 2763 break; 2764 case CMD_UNSOLICITED_ABORT: 2765 dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", 2766 c->Request.CDB[0]); 2767 return_status = IO_NEEDS_RETRY; 2768 break; 2769 case CMD_UNABORTABLE: 2770 dev_warn(&h->pdev->dev, "cmd unabortable\n"); 2771 return_status = IO_ERROR; 2772 break; 2773 default: 2774 dev_warn(&h->pdev->dev, "cmd 0x%02x returned " 2775 "unknown status %x\n", c->Request.CDB[0], 2776 c->err_info->CommandStatus); 2777 return_status = IO_ERROR; 2778 } 2779 return return_status; 2780} 2781 2782static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 2783 int attempt_retry) 2784{ 2785 DECLARE_COMPLETION_ONSTACK(wait); 2786 u64bit buff_dma_handle; 2787 int return_status = IO_OK; 2788 2789resend_cmd2: 2790 c->waiting = &wait; 2791 enqueue_cmd_and_start_io(h, c); 2792 2793 wait_for_completion(&wait); 2794 2795 if (c->err_info->CommandStatus == 0 || !attempt_retry) 2796 goto command_done; 2797 2798 return_status = process_sendcmd_error(h, c); 2799 2800 if (return_status == IO_NEEDS_RETRY && 2801 c->retry_count < MAX_CMD_RETRIES) { 2802 dev_warn(&h->pdev->dev, "retrying 0x%02x\n", 2803 c->Request.CDB[0]); 2804 c->retry_count++; 2805 /* erase the old error information */ 2806 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 2807 return_status = IO_OK; 2808 reinit_completion(&wait); 2809 goto resend_cmd2; 2810 } 2811 2812command_done: 2813 /* unlock the buffers from DMA */ 2814 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2815 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2816 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, 2817 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2818 return return_status; 2819} 2820 2821static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, 2822 __u8 page_code, unsigned char scsi3addr[], 2823 int cmd_type) 2824{ 2825 CommandList_struct *c; 2826 int return_status; 2827 2828 c = cmd_special_alloc(h); 2829 if (!c) 2830 return -ENOMEM; 2831 return_status = fill_cmd(h, c, cmd, buff, size, page_code, 2832 scsi3addr, cmd_type); 2833 if (return_status == IO_OK) 2834 return_status = sendcmd_withirq_core(h, c, 1); 2835 2836 cmd_special_free(h, c); 2837 return return_status; 2838} 2839 2840static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, 2841 sector_t total_size, 2842 unsigned int block_size, 2843 InquiryData_struct *inq_buff, 2844 drive_info_struct *drv) 2845{ 2846 int return_code; 2847 unsigned long t; 2848 unsigned char scsi3addr[8]; 2849 2850 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2851 log_unit_to_scsi3addr(h, scsi3addr, logvol); 2852 return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, 2853 sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); 2854 if (return_code == IO_OK) { 2855 if (inq_buff->data_byte[8] == 0xFF) { 2856 dev_warn(&h->pdev->dev, 2857 "reading geometry failed, volume " 2858 "does not support reading geometry\n"); 2859 drv->heads = 255; 2860 drv->sectors = 32; /* Sectors per track */ 2861 drv->cylinders = total_size + 1; 2862 drv->raid_level = RAID_UNKNOWN; 2863 } else { 2864 drv->heads = inq_buff->data_byte[6]; 2865 drv->sectors = inq_buff->data_byte[7]; 2866 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; 2867 drv->cylinders += inq_buff->data_byte[5]; 2868 drv->raid_level = inq_buff->data_byte[8]; 2869 } 2870 drv->block_size = block_size; 2871 drv->nr_blocks = total_size + 1; 2872 t = drv->heads * drv->sectors; 2873 if (t > 1) { 2874 sector_t real_size = total_size + 1; 2875 unsigned long rem = sector_div(real_size, t); 2876 if (rem) 2877 real_size++; 2878 drv->cylinders = real_size; 2879 } 2880 } else { /* Get geometry failed */ 2881 dev_warn(&h->pdev->dev, "reading geometry failed\n"); 2882 } 2883} 2884 2885static void 2886cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, 2887 unsigned int *block_size) 2888{ 2889 ReadCapdata_struct *buf; 2890 int return_code; 2891 unsigned char scsi3addr[8]; 2892 2893 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2894 if (!buf) { 2895 dev_warn(&h->pdev->dev, "out of memory\n"); 2896 return; 2897 } 2898 2899 log_unit_to_scsi3addr(h, scsi3addr, logvol); 2900 return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, 2901 sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); 2902 if (return_code == IO_OK) { 2903 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2904 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2905 } else { /* read capacity command failed */ 2906 dev_warn(&h->pdev->dev, "read capacity failed\n"); 2907 *total_size = 0; 2908 *block_size = BLOCK_SIZE; 2909 } 2910 kfree(buf); 2911} 2912 2913static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, 2914 sector_t *total_size, unsigned int *block_size) 2915{ 2916 ReadCapdata_struct_16 *buf; 2917 int return_code; 2918 unsigned char scsi3addr[8]; 2919 2920 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2921 if (!buf) { 2922 dev_warn(&h->pdev->dev, "out of memory\n"); 2923 return; 2924 } 2925 2926 log_unit_to_scsi3addr(h, scsi3addr, logvol); 2927 return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, 2928 buf, sizeof(ReadCapdata_struct_16), 2929 0, scsi3addr, TYPE_CMD); 2930 if (return_code == IO_OK) { 2931 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); 2932 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2933 } else { /* read capacity command failed */ 2934 dev_warn(&h->pdev->dev, "read capacity failed\n"); 2935 *total_size = 0; 2936 *block_size = BLOCK_SIZE; 2937 } 2938 dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", 2939 (unsigned long long)*total_size+1, *block_size); 2940 kfree(buf); 2941} 2942 2943static int cciss_revalidate(struct gendisk *disk) 2944{ 2945 ctlr_info_t *h = get_host(disk); 2946 drive_info_struct *drv = get_drv(disk); 2947 int logvol; 2948 int FOUND = 0; 2949 unsigned int block_size; 2950 sector_t total_size; 2951 InquiryData_struct *inq_buff = NULL; 2952 2953 for (logvol = 0; logvol <= h->highest_lun; logvol++) { 2954 if (!h->drv[logvol]) 2955 continue; 2956 if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2957 sizeof(drv->LunID)) == 0) { 2958 FOUND = 1; 2959 break; 2960 } 2961 } 2962 2963 if (!FOUND) 2964 return 1; 2965 2966 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 2967 if (inq_buff == NULL) { 2968 dev_warn(&h->pdev->dev, "out of memory\n"); 2969 return 1; 2970 } 2971 if (h->cciss_read == CCISS_READ_10) { 2972 cciss_read_capacity(h, logvol, 2973 &total_size, &block_size); 2974 } else { 2975 cciss_read_capacity_16(h, logvol, 2976 &total_size, &block_size); 2977 } 2978 cciss_geometry_inquiry(h, logvol, total_size, block_size, 2979 inq_buff, drv); 2980 2981 blk_queue_logical_block_size(drv->queue, drv->block_size); 2982 set_capacity(disk, drv->nr_blocks); 2983 2984 kfree(inq_buff); 2985 return 0; 2986} 2987 2988/* 2989 * Map (physical) PCI mem into (virtual) kernel space 2990 */ 2991static void __iomem *remap_pci_mem(ulong base, ulong size) 2992{ 2993 ulong page_base = ((ulong) base) & PAGE_MASK; 2994 ulong page_offs = ((ulong) base) - page_base; 2995 void __iomem *page_remapped = ioremap(page_base, page_offs + size); 2996 2997 return page_remapped ? (page_remapped + page_offs) : NULL; 2998} 2999 3000/* 3001 * Takes jobs of the Q and sends them to the hardware, then puts it on 3002 * the Q to wait for completion. 3003 */ 3004static void start_io(ctlr_info_t *h) 3005{ 3006 CommandList_struct *c; 3007 3008 while (!list_empty(&h->reqQ)) { 3009 c = list_entry(h->reqQ.next, CommandList_struct, list); 3010 /* can't do anything if fifo is full */ 3011 if ((h->access.fifo_full(h))) { 3012 dev_warn(&h->pdev->dev, "fifo full\n"); 3013 break; 3014 } 3015 3016 /* Get the first entry from the Request Q */ 3017 removeQ(c); 3018 h->Qdepth--; 3019 3020 /* Tell the controller execute command */ 3021 h->access.submit_command(h, c); 3022 3023 /* Put job onto the completed Q */ 3024 addQ(&h->cmpQ, c); 3025 } 3026} 3027 3028/* Assumes that h->lock is held. */ 3029/* Zeros out the error record and then resends the command back */ 3030/* to the controller */ 3031static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) 3032{ 3033 /* erase the old error information */ 3034 memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 3035 3036 /* add it to software queue and then send it to the controller */ 3037 addQ(&h->reqQ, c); 3038 h->Qdepth++; 3039 if (h->Qdepth > h->maxQsinceinit) 3040 h->maxQsinceinit = h->Qdepth; 3041 3042 start_io(h); 3043} 3044 3045static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, 3046 unsigned int msg_byte, unsigned int host_byte, 3047 unsigned int driver_byte) 3048{ 3049 /* inverse of macros in scsi.h */ 3050 return (scsi_status_byte & 0xff) | 3051 ((msg_byte & 0xff) << 8) | 3052 ((host_byte & 0xff) << 16) | 3053 ((driver_byte & 0xff) << 24); 3054} 3055 3056static inline int evaluate_target_status(ctlr_info_t *h, 3057 CommandList_struct *cmd, int *retry_cmd) 3058{ 3059 unsigned char sense_key; 3060 unsigned char status_byte, msg_byte, host_byte, driver_byte; 3061 int error_value; 3062 3063 *retry_cmd = 0; 3064 /* If we get in here, it means we got "target status", that is, scsi status */ 3065 status_byte = cmd->err_info->ScsiStatus; 3066 driver_byte = DRIVER_OK; 3067 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ 3068 3069 if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) 3070 host_byte = DID_PASSTHROUGH; 3071 else 3072 host_byte = DID_OK; 3073 3074 error_value = make_status_bytes(status_byte, msg_byte, 3075 host_byte, driver_byte); 3076 3077 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { 3078 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) 3079 dev_warn(&h->pdev->dev, "cmd %p " 3080 "has SCSI Status 0x%x\n", 3081 cmd, cmd->err_info->ScsiStatus); 3082 return error_value; 3083 } 3084 3085 /* check the sense key */ 3086 sense_key = 0xf & cmd->err_info->SenseInfo[2]; 3087 /* no status or recovered error */ 3088 if (((sense_key == 0x0) || (sense_key == 0x1)) && 3089 (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) 3090 error_value = 0; 3091 3092 if (check_for_unit_attention(h, cmd)) { 3093 *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); 3094 return 0; 3095 } 3096 3097 /* Not SG_IO or similar? */ 3098 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { 3099 if (error_value != 0) 3100 dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" 3101 " sense key = 0x%x\n", cmd, sense_key); 3102 return error_value; 3103 } 3104 3105 /* SG_IO or similar, copy sense data back */ 3106 if (cmd->rq->sense) { 3107 if (cmd->rq->sense_len > cmd->err_info->SenseLen) 3108 cmd->rq->sense_len = cmd->err_info->SenseLen; 3109 memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, 3110 cmd->rq->sense_len); 3111 } else 3112 cmd->rq->sense_len = 0; 3113 3114 return error_value; 3115} 3116 3117/* checks the status of the job and calls complete buffers to mark all 3118 * buffers for the completed job. Note that this function does not need 3119 * to hold the hba/queue lock. 3120 */ 3121static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, 3122 int timeout) 3123{ 3124 int retry_cmd = 0; 3125 struct request *rq = cmd->rq; 3126 3127 rq->errors = 0; 3128 3129 if (timeout) 3130 rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); 3131 3132 if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ 3133 goto after_error_processing; 3134 3135 switch (cmd->err_info->CommandStatus) { 3136 case CMD_TARGET_STATUS: 3137 rq->errors = evaluate_target_status(h, cmd, &retry_cmd); 3138 break; 3139 case CMD_DATA_UNDERRUN: 3140 if (cmd->rq->cmd_type == REQ_TYPE_FS) { 3141 dev_warn(&h->pdev->dev, "cmd %p has" 3142 " completed with data underrun " 3143 "reported\n", cmd); 3144 cmd->rq->resid_len = cmd->err_info->ResidualCnt; 3145 } 3146 break; 3147 case CMD_DATA_OVERRUN: 3148 if (cmd->rq->cmd_type == REQ_TYPE_FS) 3149 dev_warn(&h->pdev->dev, "cciss: cmd %p has" 3150 " completed with data overrun " 3151 "reported\n", cmd); 3152 break; 3153 case CMD_INVALID: 3154 dev_warn(&h->pdev->dev, "cciss: cmd %p is " 3155 "reported invalid\n", cmd); 3156 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3157 cmd->err_info->CommandStatus, DRIVER_OK, 3158 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3159 DID_PASSTHROUGH : DID_ERROR); 3160 break; 3161 case CMD_PROTOCOL_ERR: 3162 dev_warn(&h->pdev->dev, "cciss: cmd %p has " 3163 "protocol error\n", cmd); 3164 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3165 cmd->err_info->CommandStatus, DRIVER_OK, 3166 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3167 DID_PASSTHROUGH : DID_ERROR); 3168 break; 3169 case CMD_HARDWARE_ERR: 3170 dev_warn(&h->pdev->dev, "cciss: cmd %p had " 3171 " hardware error\n", cmd); 3172 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3173 cmd->err_info->CommandStatus, DRIVER_OK, 3174 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3175 DID_PASSTHROUGH : DID_ERROR); 3176 break; 3177 case CMD_CONNECTION_LOST: 3178 dev_warn(&h->pdev->dev, "cciss: cmd %p had " 3179 "connection lost\n", cmd); 3180 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3181 cmd->err_info->CommandStatus, DRIVER_OK, 3182 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3183 DID_PASSTHROUGH : DID_ERROR); 3184 break; 3185 case CMD_ABORTED: 3186 dev_warn(&h->pdev->dev, "cciss: cmd %p was " 3187 "aborted\n", cmd); 3188 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3189 cmd->err_info->CommandStatus, DRIVER_OK, 3190 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3191 DID_PASSTHROUGH : DID_ABORT); 3192 break; 3193 case CMD_ABORT_FAILED: 3194 dev_warn(&h->pdev->dev, "cciss: cmd %p reports " 3195 "abort failed\n", cmd); 3196 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3197 cmd->err_info->CommandStatus, DRIVER_OK, 3198 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3199 DID_PASSTHROUGH : DID_ERROR); 3200 break; 3201 case CMD_UNSOLICITED_ABORT: 3202 dev_warn(&h->pdev->dev, "cciss%d: unsolicited " 3203 "abort %p\n", h->ctlr, cmd); 3204 if (cmd->retry_count < MAX_CMD_RETRIES) { 3205 retry_cmd = 1; 3206 dev_warn(&h->pdev->dev, "retrying %p\n", cmd); 3207 cmd->retry_count++; 3208 } else 3209 dev_warn(&h->pdev->dev, 3210 "%p retried too many times\n", cmd); 3211 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3212 cmd->err_info->CommandStatus, DRIVER_OK, 3213 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3214 DID_PASSTHROUGH : DID_ABORT); 3215 break; 3216 case CMD_TIMEOUT: 3217 dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); 3218 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3219 cmd->err_info->CommandStatus, DRIVER_OK, 3220 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3221 DID_PASSTHROUGH : DID_ERROR); 3222 break; 3223 case CMD_UNABORTABLE: 3224 dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); 3225 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3226 cmd->err_info->CommandStatus, DRIVER_OK, 3227 cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? 3228 DID_PASSTHROUGH : DID_ERROR); 3229 break; 3230 default: 3231 dev_warn(&h->pdev->dev, "cmd %p returned " 3232 "unknown status %x\n", cmd, 3233 cmd->err_info->CommandStatus); 3234 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3235 cmd->err_info->CommandStatus, DRIVER_OK, 3236 (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3237 DID_PASSTHROUGH : DID_ERROR); 3238 } 3239 3240after_error_processing: 3241 3242 /* We need to return this command */ 3243 if (retry_cmd) { 3244 resend_cciss_cmd(h, cmd); 3245 return; 3246 } 3247 cmd->rq->completion_data = cmd; 3248 blk_complete_request(cmd->rq); 3249} 3250 3251static inline u32 cciss_tag_contains_index(u32 tag) 3252{ 3253#define DIRECT_LOOKUP_BIT 0x10 3254 return tag & DIRECT_LOOKUP_BIT; 3255} 3256 3257static inline u32 cciss_tag_to_index(u32 tag) 3258{ 3259#define DIRECT_LOOKUP_SHIFT 5 3260 return tag >> DIRECT_LOOKUP_SHIFT; 3261} 3262 3263static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) 3264{ 3265#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) 3266#define CCISS_SIMPLE_ERROR_BITS 0x03 3267 if (likely(h->transMethod & CFGTBL_Trans_Performant)) 3268 return tag & ~CCISS_PERF_ERROR_BITS; 3269 return tag & ~CCISS_SIMPLE_ERROR_BITS; 3270} 3271 3272static inline void cciss_mark_tag_indexed(u32 *tag) 3273{ 3274 *tag |= DIRECT_LOOKUP_BIT; 3275} 3276 3277static inline void cciss_set_tag_index(u32 *tag, u32 index) 3278{ 3279 *tag |= (index << DIRECT_LOOKUP_SHIFT); 3280} 3281 3282/* 3283 * Get a request and submit it to the controller. 3284 */ 3285static void do_cciss_request(struct request_queue *q) 3286{ 3287 ctlr_info_t *h = q->queuedata; 3288 CommandList_struct *c; 3289 sector_t start_blk; 3290 int seg; 3291 struct request *creq; 3292 u64bit temp64; 3293 struct scatterlist *tmp_sg; 3294 SGDescriptor_struct *curr_sg; 3295 drive_info_struct *drv; 3296 int i, dir; 3297 int sg_index = 0; 3298 int chained = 0; 3299 3300 queue: 3301 creq = blk_peek_request(q); 3302 if (!creq) 3303 goto startio; 3304 3305 BUG_ON(creq->nr_phys_segments > h->maxsgentries); 3306 3307 c = cmd_alloc(h); 3308 if (!c) 3309 goto full; 3310 3311 blk_start_request(creq); 3312 3313 tmp_sg = h->scatter_list[c->cmdindex]; 3314 spin_unlock_irq(q->queue_lock); 3315 3316 c->cmd_type = CMD_RWREQ; 3317 c->rq = creq; 3318 3319 /* fill in the request */ 3320 drv = creq->rq_disk->private_data; 3321 c->Header.ReplyQueue = 0; /* unused in simple mode */ 3322 /* got command from pool, so use the command block index instead */ 3323 /* for direct lookups. */ 3324 /* The first 2 bits are reserved for controller error reporting. */ 3325 cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); 3326 cciss_mark_tag_indexed(&c->Header.Tag.lower); 3327 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); 3328 c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ 3329 c->Request.Type.Type = TYPE_CMD; /* It is a command. */ 3330 c->Request.Type.Attribute = ATTR_SIMPLE; 3331 c->Request.Type.Direction = 3332 (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; 3333 c->Request.Timeout = 0; /* Don't time out */ 3334 c->Request.CDB[0] = 3335 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3336 start_blk = blk_rq_pos(creq); 3337 dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", 3338 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 3339 sg_init_table(tmp_sg, h->maxsgentries); 3340 seg = blk_rq_map_sg(q, creq, tmp_sg); 3341 3342 /* get the DMA records for the setup */ 3343 if (c->Request.Type.Direction == XFER_READ) 3344 dir = PCI_DMA_FROMDEVICE; 3345 else 3346 dir = PCI_DMA_TODEVICE; 3347 3348 curr_sg = c->SG; 3349 sg_index = 0; 3350 chained = 0; 3351 3352 for (i = 0; i < seg; i++) { 3353 if (((sg_index+1) == (h->max_cmd_sgentries)) && 3354 !chained && ((seg - i) > 1)) { 3355 /* Point to next chain block. */ 3356 curr_sg = h->cmd_sg_list[c->cmdindex]; 3357 sg_index = 0; 3358 chained = 1; 3359 } 3360 curr_sg[sg_index].Len = tmp_sg[i].length; 3361 temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), 3362 tmp_sg[i].offset, 3363 tmp_sg[i].length, dir); 3364 curr_sg[sg_index].Addr.lower = temp64.val32.lower; 3365 curr_sg[sg_index].Addr.upper = temp64.val32.upper; 3366 curr_sg[sg_index].Ext = 0; /* we are not chaining */ 3367 ++sg_index; 3368 } 3369 if (chained) 3370 cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], 3371 (seg - (h->max_cmd_sgentries - 1)) * 3372 sizeof(SGDescriptor_struct)); 3373 3374 /* track how many SG entries we are using */ 3375 if (seg > h->maxSG) 3376 h->maxSG = seg; 3377 3378 dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " 3379 "chained[%d]\n", 3380 blk_rq_sectors(creq), seg, chained); 3381 3382 c->Header.SGTotal = seg + chained; 3383 if (seg <= h->max_cmd_sgentries) 3384 c->Header.SGList = c->Header.SGTotal; 3385 else 3386 c->Header.SGList = h->max_cmd_sgentries; 3387 set_performant_mode(h, c); 3388 3389 if (likely(creq->cmd_type == REQ_TYPE_FS)) { 3390 if(h->cciss_read == CCISS_READ_10) { 3391 c->Request.CDB[1] = 0; 3392 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ 3393 c->Request.CDB[3] = (start_blk >> 16) & 0xff; 3394 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 3395 c->Request.CDB[5] = start_blk & 0xff; 3396 c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ 3397 c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; 3398 c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; 3399 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 3400 } else { 3401 u32 upper32 = upper_32_bits(start_blk); 3402 3403 c->Request.CDBLen = 16; 3404 c->Request.CDB[1]= 0; 3405 c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ 3406 c->Request.CDB[3]= (upper32 >> 16) & 0xff; 3407 c->Request.CDB[4]= (upper32 >> 8) & 0xff; 3408 c->Request.CDB[5]= upper32 & 0xff; 3409 c->Request.CDB[6]= (start_blk >> 24) & 0xff; 3410 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 3411 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 3412 c->Request.CDB[9]= start_blk & 0xff; 3413 c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; 3414 c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; 3415 c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; 3416 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3417 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3418 } 3419 } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { 3420 c->Request.CDBLen = creq->cmd_len; 3421 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); 3422 } else { 3423 dev_warn(&h->pdev->dev, "bad request type %d\n", 3424 creq->cmd_type); 3425 BUG(); 3426 } 3427 3428 spin_lock_irq(q->queue_lock); 3429 3430 addQ(&h->reqQ, c); 3431 h->Qdepth++; 3432 if (h->Qdepth > h->maxQsinceinit) 3433 h->maxQsinceinit = h->Qdepth; 3434 3435 goto queue; 3436full: 3437 blk_stop_queue(q); 3438startio: 3439 /* We will already have the driver lock here so not need 3440 * to lock it. 3441 */ 3442 start_io(h); 3443} 3444 3445static inline unsigned long get_next_completion(ctlr_info_t *h) 3446{ 3447 return h->access.command_completed(h); 3448} 3449 3450static inline int interrupt_pending(ctlr_info_t *h) 3451{ 3452 return h->access.intr_pending(h); 3453} 3454 3455static inline long interrupt_not_for_us(ctlr_info_t *h) 3456{ 3457 return ((h->access.intr_pending(h) == 0) || 3458 (h->interrupts_enabled == 0)); 3459} 3460 3461static inline int bad_tag(ctlr_info_t *h, u32 tag_index, 3462 u32 raw_tag) 3463{ 3464 if (unlikely(tag_index >= h->nr_cmds)) { 3465 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 3466 return 1; 3467 } 3468 return 0; 3469} 3470 3471static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, 3472 u32 raw_tag) 3473{ 3474 removeQ(c); 3475 if (likely(c->cmd_type == CMD_RWREQ)) 3476 complete_command(h, c, 0); 3477 else if (c->cmd_type == CMD_IOCTL_PEND) 3478 complete(c->waiting); 3479#ifdef CONFIG_CISS_SCSI_TAPE 3480 else if (c->cmd_type == CMD_SCSI) 3481 complete_scsi_command(c, 0, raw_tag); 3482#endif 3483} 3484 3485static inline u32 next_command(ctlr_info_t *h) 3486{ 3487 u32 a; 3488 3489 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) 3490 return h->access.command_completed(h); 3491 3492 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 3493 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 3494 (h->reply_pool_head)++; 3495 h->commands_outstanding--; 3496 } else { 3497 a = FIFO_EMPTY; 3498 } 3499 /* Check for wraparound */ 3500 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 3501 h->reply_pool_head = h->reply_pool; 3502 h->reply_pool_wraparound ^= 1; 3503 } 3504 return a; 3505} 3506 3507/* process completion of an indexed ("direct lookup") command */ 3508static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) 3509{ 3510 u32 tag_index; 3511 CommandList_struct *c; 3512 3513 tag_index = cciss_tag_to_index(raw_tag); 3514 if (bad_tag(h, tag_index, raw_tag)) 3515 return next_command(h); 3516 c = h->cmd_pool + tag_index; 3517 finish_cmd(h, c, raw_tag); 3518 return next_command(h); 3519} 3520 3521/* process completion of a non-indexed command */ 3522static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) 3523{ 3524 CommandList_struct *c = NULL; 3525 __u32 busaddr_masked, tag_masked; 3526 3527 tag_masked = cciss_tag_discard_error_bits(h, raw_tag); 3528 list_for_each_entry(c, &h->cmpQ, list) { 3529 busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); 3530 if (busaddr_masked == tag_masked) { 3531 finish_cmd(h, c, raw_tag); 3532 return next_command(h); 3533 } 3534 } 3535 bad_tag(h, h->nr_cmds + 1, raw_tag); 3536 return next_command(h); 3537} 3538 3539/* Some controllers, like p400, will give us one interrupt 3540 * after a soft reset, even if we turned interrupts off. 3541 * Only need to check for this in the cciss_xxx_discard_completions 3542 * functions. 3543 */ 3544static int ignore_bogus_interrupt(ctlr_info_t *h) 3545{ 3546 if (likely(!reset_devices)) 3547 return 0; 3548 3549 if (likely(h->interrupts_enabled)) 3550 return 0; 3551 3552 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " 3553 "(known firmware bug.) Ignoring.\n"); 3554 3555 return 1; 3556} 3557 3558static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id) 3559{ 3560 ctlr_info_t *h = dev_id; 3561 unsigned long flags; 3562 u32 raw_tag; 3563 3564 if (ignore_bogus_interrupt(h)) 3565 return IRQ_NONE; 3566 3567 if (interrupt_not_for_us(h)) 3568 return IRQ_NONE; 3569 spin_lock_irqsave(&h->lock, flags); 3570 while (interrupt_pending(h)) { 3571 raw_tag = get_next_completion(h); 3572 while (raw_tag != FIFO_EMPTY) 3573 raw_tag = next_command(h); 3574 } 3575 spin_unlock_irqrestore(&h->lock, flags); 3576 return IRQ_HANDLED; 3577} 3578 3579static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id) 3580{ 3581 ctlr_info_t *h = dev_id; 3582 unsigned long flags; 3583 u32 raw_tag; 3584 3585 if (ignore_bogus_interrupt(h)) 3586 return IRQ_NONE; 3587 3588 spin_lock_irqsave(&h->lock, flags); 3589 raw_tag = get_next_completion(h); 3590 while (raw_tag != FIFO_EMPTY) 3591 raw_tag = next_command(h); 3592 spin_unlock_irqrestore(&h->lock, flags); 3593 return IRQ_HANDLED; 3594} 3595 3596static irqreturn_t do_cciss_intx(int irq, void *dev_id) 3597{ 3598 ctlr_info_t *h = dev_id; 3599 unsigned long flags; 3600 u32 raw_tag; 3601 3602 if (interrupt_not_for_us(h)) 3603 return IRQ_NONE; 3604 spin_lock_irqsave(&h->lock, flags); 3605 while (interrupt_pending(h)) { 3606 raw_tag = get_next_completion(h); 3607 while (raw_tag != FIFO_EMPTY) { 3608 if (cciss_tag_contains_index(raw_tag)) 3609 raw_tag = process_indexed_cmd(h, raw_tag); 3610 else 3611 raw_tag = process_nonindexed_cmd(h, raw_tag); 3612 } 3613 } 3614 spin_unlock_irqrestore(&h->lock, flags); 3615 return IRQ_HANDLED; 3616} 3617 3618/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never 3619 * check the interrupt pending register because it is not set. 3620 */ 3621static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) 3622{ 3623 ctlr_info_t *h = dev_id; 3624 unsigned long flags; 3625 u32 raw_tag; 3626 3627 spin_lock_irqsave(&h->lock, flags); 3628 raw_tag = get_next_completion(h); 3629 while (raw_tag != FIFO_EMPTY) { 3630 if (cciss_tag_contains_index(raw_tag)) 3631 raw_tag = process_indexed_cmd(h, raw_tag); 3632 else 3633 raw_tag = process_nonindexed_cmd(h, raw_tag); 3634 } 3635 spin_unlock_irqrestore(&h->lock, flags); 3636 return IRQ_HANDLED; 3637} 3638 3639/** 3640 * add_to_scan_list() - add controller to rescan queue 3641 * @h: Pointer to the controller. 3642 * 3643 * Adds the controller to the rescan queue if not already on the queue. 3644 * 3645 * returns 1 if added to the queue, 0 if skipped (could be on the 3646 * queue already, or the controller could be initializing or shutting 3647 * down). 3648 **/ 3649static int add_to_scan_list(struct ctlr_info *h) 3650{ 3651 struct ctlr_info *test_h; 3652 int found = 0; 3653 int ret = 0; 3654 3655 if (h->busy_initializing) 3656 return 0; 3657 3658 if (!mutex_trylock(&h->busy_shutting_down)) 3659 return 0; 3660 3661 mutex_lock(&scan_mutex); 3662 list_for_each_entry(test_h, &scan_q, scan_list) { 3663 if (test_h == h) { 3664 found = 1; 3665 break; 3666 } 3667 } 3668 if (!found && !h->busy_scanning) { 3669 reinit_completion(&h->scan_wait); 3670 list_add_tail(&h->scan_list, &scan_q); 3671 ret = 1; 3672 } 3673 mutex_unlock(&scan_mutex); 3674 mutex_unlock(&h->busy_shutting_down); 3675 3676 return ret; 3677} 3678 3679/** 3680 * remove_from_scan_list() - remove controller from rescan queue 3681 * @h: Pointer to the controller. 3682 * 3683 * Removes the controller from the rescan queue if present. Blocks if 3684 * the controller is currently conducting a rescan. The controller 3685 * can be in one of three states: 3686 * 1. Doesn't need a scan 3687 * 2. On the scan list, but not scanning yet (we remove it) 3688 * 3. Busy scanning (and not on the list). In this case we want to wait for 3689 * the scan to complete to make sure the scanning thread for this 3690 * controller is completely idle. 3691 **/ 3692static void remove_from_scan_list(struct ctlr_info *h) 3693{ 3694 struct ctlr_info *test_h, *tmp_h; 3695 3696 mutex_lock(&scan_mutex); 3697 list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { 3698 if (test_h == h) { /* state 2. */ 3699 list_del(&h->scan_list); 3700 complete_all(&h->scan_wait); 3701 mutex_unlock(&scan_mutex); 3702 return; 3703 } 3704 } 3705 if (h->busy_scanning) { /* state 3. */ 3706 mutex_unlock(&scan_mutex); 3707 wait_for_completion(&h->scan_wait); 3708 } else { /* state 1, nothing to do. */ 3709 mutex_unlock(&scan_mutex); 3710 } 3711} 3712 3713/** 3714 * scan_thread() - kernel thread used to rescan controllers 3715 * @data: Ignored. 3716 * 3717 * A kernel thread used scan for drive topology changes on 3718 * controllers. The thread processes only one controller at a time 3719 * using a queue. Controllers are added to the queue using 3720 * add_to_scan_list() and removed from the queue either after done 3721 * processing or using remove_from_scan_list(). 3722 * 3723 * returns 0. 3724 **/ 3725static int scan_thread(void *data) 3726{ 3727 struct ctlr_info *h; 3728 3729 while (1) { 3730 set_current_state(TASK_INTERRUPTIBLE); 3731 schedule(); 3732 if (kthread_should_stop()) 3733 break; 3734 3735 while (1) { 3736 mutex_lock(&scan_mutex); 3737 if (list_empty(&scan_q)) { 3738 mutex_unlock(&scan_mutex); 3739 break; 3740 } 3741 3742 h = list_entry(scan_q.next, 3743 struct ctlr_info, 3744 scan_list); 3745 list_del(&h->scan_list); 3746 h->busy_scanning = 1; 3747 mutex_unlock(&scan_mutex); 3748 3749 rebuild_lun_table(h, 0, 0); 3750 complete_all(&h->scan_wait); 3751 mutex_lock(&scan_mutex); 3752 h->busy_scanning = 0; 3753 mutex_unlock(&scan_mutex); 3754 } 3755 } 3756 3757 return 0; 3758} 3759 3760static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) 3761{ 3762 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) 3763 return 0; 3764 3765 switch (c->err_info->SenseInfo[12]) { 3766 case STATE_CHANGED: 3767 dev_warn(&h->pdev->dev, "a state change " 3768 "detected, command retried\n"); 3769 return 1; 3770 break; 3771 case LUN_FAILED: 3772 dev_warn(&h->pdev->dev, "LUN failure " 3773 "detected, action required\n"); 3774 return 1; 3775 break; 3776 case REPORT_LUNS_CHANGED: 3777 dev_warn(&h->pdev->dev, "report LUN data changed\n"); 3778 /* 3779 * Here, we could call add_to_scan_list and wake up the scan thread, 3780 * except that it's quite likely that we will get more than one 3781 * REPORT_LUNS_CHANGED condition in quick succession, which means 3782 * that those which occur after the first one will likely happen 3783 * *during* the scan_thread's rescan. And the rescan code is not 3784 * robust enough to restart in the middle, undoing what it has already 3785 * done, and it's not clear that it's even possible to do this, since 3786 * part of what it does is notify the block layer, which starts 3787 * doing it's own i/o to read partition tables and so on, and the 3788 * driver doesn't have visibility to know what might need undoing. 3789 * In any event, if possible, it is horribly complicated to get right 3790 * so we just don't do it for now. 3791 * 3792 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 3793 */ 3794 return 1; 3795 break; 3796 case POWER_OR_RESET: 3797 dev_warn(&h->pdev->dev, 3798 "a power on or device reset detected\n"); 3799 return 1; 3800 break; 3801 case UNIT_ATTENTION_CLEARED: 3802 dev_warn(&h->pdev->dev, 3803 "unit attention cleared by another initiator\n"); 3804 return 1; 3805 break; 3806 default: 3807 dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); 3808 return 1; 3809 } 3810} 3811 3812/* 3813 * We cannot read the structure directly, for portability we must use 3814 * the io functions. 3815 * This is for debug only. 3816 */ 3817static void print_cfg_table(ctlr_info_t *h) 3818{ 3819 int i; 3820 char temp_name[17]; 3821 CfgTable_struct *tb = h->cfgtable; 3822 3823 dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); 3824 dev_dbg(&h->pdev->dev, "------------------------------------\n"); 3825 for (i = 0; i < 4; i++) 3826 temp_name[i] = readb(&(tb->Signature[i])); 3827 temp_name[4] = '\0'; 3828 dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); 3829 dev_dbg(&h->pdev->dev, " Spec Number = %d\n", 3830 readl(&(tb->SpecValence))); 3831 dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", 3832 readl(&(tb->TransportSupport))); 3833 dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", 3834 readl(&(tb->TransportActive))); 3835 dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", 3836 readl(&(tb->HostWrite.TransportRequest))); 3837 dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", 3838 readl(&(tb->HostWrite.CoalIntDelay))); 3839 dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", 3840 readl(&(tb->HostWrite.CoalIntCount))); 3841 dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n", 3842 readl(&(tb->CmdsOutMax))); 3843 dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", 3844 readl(&(tb->BusTypes))); 3845 for (i = 0; i < 16; i++) 3846 temp_name[i] = readb(&(tb->ServerName[i])); 3847 temp_name[16] = '\0'; 3848 dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); 3849 dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", 3850 readl(&(tb->HeartBeat))); 3851} 3852 3853static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3854{ 3855 int i, offset, mem_type, bar_type; 3856 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ 3857 return 0; 3858 offset = 0; 3859 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3860 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; 3861 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) 3862 offset += 4; 3863 else { 3864 mem_type = pci_resource_flags(pdev, i) & 3865 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 3866 switch (mem_type) { 3867 case PCI_BASE_ADDRESS_MEM_TYPE_32: 3868 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 3869 offset += 4; /* 32 bit */ 3870 break; 3871 case PCI_BASE_ADDRESS_MEM_TYPE_64: 3872 offset += 8; 3873 break; 3874 default: /* reserved in PCI 2.2 */ 3875 dev_warn(&pdev->dev, 3876 "Base address is invalid\n"); 3877 return -1; 3878 break; 3879 } 3880 } 3881 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) 3882 return i + 1; 3883 } 3884 return -1; 3885} 3886 3887/* Fill in bucket_map[], given nsgs (the max number of 3888 * scatter gather elements supported) and bucket[], 3889 * which is an array of 8 integers. The bucket[] array 3890 * contains 8 different DMA transfer sizes (in 16 3891 * byte increments) which the controller uses to fetch 3892 * commands. This function fills in bucket_map[], which 3893 * maps a given number of scatter gather elements to one of 3894 * the 8 DMA transfer sizes. The point of it is to allow the 3895 * controller to only do as much DMA as needed to fetch the 3896 * command, with the DMA transfer size encoded in the lower 3897 * bits of the command address. 3898 */ 3899static void calc_bucket_map(int bucket[], int num_buckets, 3900 int nsgs, int *bucket_map) 3901{ 3902 int i, j, b, size; 3903 3904 /* even a command with 0 SGs requires 4 blocks */ 3905#define MINIMUM_TRANSFER_BLOCKS 4 3906#define NUM_BUCKETS 8 3907 /* Note, bucket_map must have nsgs+1 entries. */ 3908 for (i = 0; i <= nsgs; i++) { 3909 /* Compute size of a command with i SG entries */ 3910 size = i + MINIMUM_TRANSFER_BLOCKS; 3911 b = num_buckets; /* Assume the biggest bucket */ 3912 /* Find the bucket that is just big enough */ 3913 for (j = 0; j < 8; j++) { 3914 if (bucket[j] >= size) { 3915 b = j; 3916 break; 3917 } 3918 } 3919 /* for a command with i SG entries, use bucket b. */ 3920 bucket_map[i] = b; 3921 } 3922} 3923 3924static void cciss_wait_for_mode_change_ack(ctlr_info_t *h) 3925{ 3926 int i; 3927 3928 /* under certain very rare conditions, this can take awhile. 3929 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3930 * as we enter this code.) */ 3931 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3932 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3933 break; 3934 usleep_range(10000, 20000); 3935 } 3936} 3937 3938static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags) 3939{ 3940 /* This is a bit complicated. There are 8 registers on 3941 * the controller which we write to to tell it 8 different 3942 * sizes of commands which there may be. It's a way of 3943 * reducing the DMA done to fetch each command. Encoded into 3944 * each command's tag are 3 bits which communicate to the controller 3945 * which of the eight sizes that command fits within. The size of 3946 * each command depends on how many scatter gather entries there are. 3947 * Each SG entry requires 16 bytes. The eight registers are programmed 3948 * with the number of 16-byte blocks a command of that size requires. 3949 * The smallest command possible requires 5 such 16 byte blocks. 3950 * the largest command possible requires MAXSGENTRIES + 4 16-byte 3951 * blocks. Note, this only extends to the SG entries contained 3952 * within the command block, and does not extend to chained blocks 3953 * of SG elements. bft[] contains the eight values we write to 3954 * the registers. They are not evenly distributed, but have more 3955 * sizes for small commands, and fewer sizes for larger commands. 3956 */ 3957 __u32 trans_offset; 3958 int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 3959 /* 3960 * 5 = 1 s/g entry or 4k 3961 * 6 = 2 s/g entry or 8k 3962 * 8 = 4 s/g entry or 16k 3963 * 10 = 6 s/g entry or 24k 3964 */ 3965 unsigned long register_value; 3966 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 3967 3968 h->reply_pool_wraparound = 1; /* spec: init to 1 */ 3969 3970 /* Controller spec: zero out this buffer. */ 3971 memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); 3972 h->reply_pool_head = h->reply_pool; 3973 3974 trans_offset = readl(&(h->cfgtable->TransMethodOffset)); 3975 calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, 3976 h->blockFetchTable); 3977 writel(bft[0], &h->transtable->BlockFetch0); 3978 writel(bft[1], &h->transtable->BlockFetch1); 3979 writel(bft[2], &h->transtable->BlockFetch2); 3980 writel(bft[3], &h->transtable->BlockFetch3); 3981 writel(bft[4], &h->transtable->BlockFetch4); 3982 writel(bft[5], &h->transtable->BlockFetch5); 3983 writel(bft[6], &h->transtable->BlockFetch6); 3984 writel(bft[7], &h->transtable->BlockFetch7); 3985 3986 /* size of controller ring buffer */ 3987 writel(h->max_commands, &h->transtable->RepQSize); 3988 writel(1, &h->transtable->RepQCount); 3989 writel(0, &h->transtable->RepQCtrAddrLow32); 3990 writel(0, &h->transtable->RepQCtrAddrHigh32); 3991 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 3992 writel(0, &h->transtable->RepQAddr0High32); 3993 writel(CFGTBL_Trans_Performant | use_short_tags, 3994 &(h->cfgtable->HostWrite.TransportRequest)); 3995 3996 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3997 cciss_wait_for_mode_change_ack(h); 3998 register_value = readl(&(h->cfgtable->TransportActive)); 3999 if (!(register_value & CFGTBL_Trans_Performant)) 4000 dev_warn(&h->pdev->dev, "cciss: unable to get board into" 4001 " performant mode\n"); 4002} 4003 4004static void cciss_put_controller_into_performant_mode(ctlr_info_t *h) 4005{ 4006 __u32 trans_support; 4007 4008 if (cciss_simple_mode) 4009 return; 4010 4011 dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); 4012 /* Attempt to put controller into performant mode if supported */ 4013 /* Does board support performant mode? */ 4014 trans_support = readl(&(h->cfgtable->TransportSupport)); 4015 if (!(trans_support & PERFORMANT_MODE)) 4016 return; 4017 4018 dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); 4019 /* Performant mode demands commands on a 32 byte boundary 4020 * pci_alloc_consistent aligns on page boundarys already. 4021 * Just need to check if divisible by 32 4022 */ 4023 if ((sizeof(CommandList_struct) % 32) != 0) { 4024 dev_warn(&h->pdev->dev, "%s %d %s\n", 4025 "cciss info: command size[", 4026 (int)sizeof(CommandList_struct), 4027 "] not divisible by 32, no performant mode..\n"); 4028 return; 4029 } 4030 4031 /* Performant mode ring buffer and supporting data structures */ 4032 h->reply_pool = (__u64 *)pci_alloc_consistent( 4033 h->pdev, h->max_commands * sizeof(__u64), 4034 &(h->reply_pool_dhandle)); 4035 4036 /* Need a block fetch table for performant mode */ 4037 h->blockFetchTable = kmalloc(((h->maxsgentries+1) * 4038 sizeof(__u32)), GFP_KERNEL); 4039 4040 if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) 4041 goto clean_up; 4042 4043 cciss_enter_performant_mode(h, 4044 trans_support & CFGTBL_Trans_use_short_tags); 4045 4046 /* Change the access methods to the performant access methods */ 4047 h->access = SA5_performant_access; 4048 h->transMethod = CFGTBL_Trans_Performant; 4049 4050 return; 4051clean_up: 4052 kfree(h->blockFetchTable); 4053 if (h->reply_pool) 4054 pci_free_consistent(h->pdev, 4055 h->max_commands * sizeof(__u64), 4056 h->reply_pool, 4057 h->reply_pool_dhandle); 4058 return; 4059 4060} /* cciss_put_controller_into_performant_mode */ 4061 4062/* If MSI/MSI-X is supported by the kernel we will try to enable it on 4063 * controllers that are capable. If not, we use IO-APIC mode. 4064 */ 4065 4066static void cciss_interrupt_mode(ctlr_info_t *h) 4067{ 4068#ifdef CONFIG_PCI_MSI 4069 int err; 4070 struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, 4071 {0, 2}, {0, 3} 4072 }; 4073 4074 /* Some boards advertise MSI but don't really support it */ 4075 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 4076 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 4077 goto default_int_mode; 4078 4079 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 4080 err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4); 4081 if (!err) { 4082 h->intr[0] = cciss_msix_entries[0].vector; 4083 h->intr[1] = cciss_msix_entries[1].vector; 4084 h->intr[2] = cciss_msix_entries[2].vector; 4085 h->intr[3] = cciss_msix_entries[3].vector; 4086 h->msix_vector = 1; 4087 return; 4088 } else { 4089 dev_warn(&h->pdev->dev, 4090 "MSI-X init failed %d\n", err); 4091 } 4092 } 4093 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 4094 if (!pci_enable_msi(h->pdev)) 4095 h->msi_vector = 1; 4096 else 4097 dev_warn(&h->pdev->dev, "MSI init failed\n"); 4098 } 4099default_int_mode: 4100#endif /* CONFIG_PCI_MSI */ 4101 /* if we get here we're going to use the default interrupt mode */ 4102 h->intr[h->intr_mode] = h->pdev->irq; 4103 return; 4104} 4105 4106static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 4107{ 4108 int i; 4109 u32 subsystem_vendor_id, subsystem_device_id; 4110 4111 subsystem_vendor_id = pdev->subsystem_vendor; 4112 subsystem_device_id = pdev->subsystem_device; 4113 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 4114 subsystem_vendor_id; 4115 4116 for (i = 0; i < ARRAY_SIZE(products); i++) { 4117 /* Stand aside for hpsa driver on request */ 4118 if (cciss_allow_hpsa) 4119 return -ENODEV; 4120 if (*board_id == products[i].board_id) 4121 return i; 4122 } 4123 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", 4124 *board_id); 4125 return -ENODEV; 4126} 4127 4128static inline bool cciss_board_disabled(ctlr_info_t *h) 4129{ 4130 u16 command; 4131 4132 (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); 4133 return ((command & PCI_COMMAND_MEMORY) == 0); 4134} 4135 4136static int cciss_pci_find_memory_BAR(struct pci_dev *pdev, 4137 unsigned long *memory_bar) 4138{ 4139 int i; 4140 4141 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 4142 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 4143 /* addressing mode bits already removed */ 4144 *memory_bar = pci_resource_start(pdev, i); 4145 dev_dbg(&pdev->dev, "memory BAR = %lx\n", 4146 *memory_bar); 4147 return 0; 4148 } 4149 dev_warn(&pdev->dev, "no memory BAR found\n"); 4150 return -ENODEV; 4151} 4152 4153static int cciss_wait_for_board_state(struct pci_dev *pdev, 4154 void __iomem *vaddr, int wait_for_ready) 4155#define BOARD_READY 1 4156#define BOARD_NOT_READY 0 4157{ 4158 int i, iterations; 4159 u32 scratchpad; 4160 4161 if (wait_for_ready) 4162 iterations = CCISS_BOARD_READY_ITERATIONS; 4163 else 4164 iterations = CCISS_BOARD_NOT_READY_ITERATIONS; 4165 4166 for (i = 0; i < iterations; i++) { 4167 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); 4168 if (wait_for_ready) { 4169 if (scratchpad == CCISS_FIRMWARE_READY) 4170 return 0; 4171 } else { 4172 if (scratchpad != CCISS_FIRMWARE_READY) 4173 return 0; 4174 } 4175 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); 4176 } 4177 dev_warn(&pdev->dev, "board not ready, timed out.\n"); 4178 return -ENODEV; 4179} 4180 4181static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, 4182 u32 *cfg_base_addr, u64 *cfg_base_addr_index, 4183 u64 *cfg_offset) 4184{ 4185 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 4186 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 4187 *cfg_base_addr &= (u32) 0x0000ffff; 4188 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 4189 if (*cfg_base_addr_index == -1) { 4190 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " 4191 "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); 4192 return -ENODEV; 4193 } 4194 return 0; 4195} 4196 4197static int cciss_find_cfgtables(ctlr_info_t *h) 4198{ 4199 u64 cfg_offset; 4200 u32 cfg_base_addr; 4201 u64 cfg_base_addr_index; 4202 u32 trans_offset; 4203 int rc; 4204 4205 rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 4206 &cfg_base_addr_index, &cfg_offset); 4207 if (rc) 4208 return rc; 4209 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 4210 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 4211 if (!h->cfgtable) 4212 return -ENOMEM; 4213 rc = write_driver_ver_to_cfgtable(h->cfgtable); 4214 if (rc) 4215 return rc; 4216 /* Find performant mode table. */ 4217 trans_offset = readl(&h->cfgtable->TransMethodOffset); 4218 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 4219 cfg_base_addr_index)+cfg_offset+trans_offset, 4220 sizeof(*h->transtable)); 4221 if (!h->transtable) 4222 return -ENOMEM; 4223 return 0; 4224} 4225 4226static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h) 4227{ 4228 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 4229 4230 /* Limit commands in memory limited kdump scenario. */ 4231 if (reset_devices && h->max_commands > 32) 4232 h->max_commands = 32; 4233 4234 if (h->max_commands < 16) { 4235 dev_warn(&h->pdev->dev, "Controller reports " 4236 "max supported commands of %d, an obvious lie. " 4237 "Using 16. Ensure that firmware is up to date.\n", 4238 h->max_commands); 4239 h->max_commands = 16; 4240 } 4241} 4242 4243/* Interrogate the hardware for some limits: 4244 * max commands, max SG elements without chaining, and with chaining, 4245 * SG chain block size, etc. 4246 */ 4247static void cciss_find_board_params(ctlr_info_t *h) 4248{ 4249 cciss_get_max_perf_mode_cmds(h); 4250 h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; 4251 h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); 4252 /* 4253 * The P600 may exhibit poor performnace under some workloads 4254 * if we use the value in the configuration table. Limit this 4255 * controller to MAXSGENTRIES (32) instead. 4256 */ 4257 if (h->board_id == 0x3225103C) 4258 h->maxsgentries = MAXSGENTRIES; 4259 /* 4260 * Limit in-command s/g elements to 32 save dma'able memory. 4261 * Howvever spec says if 0, use 31 4262 */ 4263 h->max_cmd_sgentries = 31; 4264 if (h->maxsgentries > 512) { 4265 h->max_cmd_sgentries = 32; 4266 h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; 4267 h->maxsgentries--; /* save one for chain pointer */ 4268 } else { 4269 h->maxsgentries = 31; /* default to traditional values */ 4270 h->chainsize = 0; 4271 } 4272} 4273 4274static inline bool CISS_signature_present(ctlr_info_t *h) 4275{ 4276 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { 4277 dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 4278 return false; 4279 } 4280 return true; 4281} 4282 4283/* Need to enable prefetch in the SCSI core for 6400 in x86 */ 4284static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) 4285{ 4286#ifdef CONFIG_X86 4287 u32 prefetch; 4288 4289 prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 4290 prefetch |= 0x100; 4291 writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 4292#endif 4293} 4294 4295/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 4296 * in a prefetch beyond physical memory. 4297 */ 4298static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) 4299{ 4300 u32 dma_prefetch; 4301 __u32 dma_refetch; 4302 4303 if (h->board_id != 0x3225103C) 4304 return; 4305 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 4306 dma_prefetch |= 0x8000; 4307 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 4308 pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); 4309 dma_refetch |= 0x1; 4310 pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); 4311} 4312 4313static int cciss_pci_init(ctlr_info_t *h) 4314{ 4315 int prod_index, err; 4316 4317 prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); 4318 if (prod_index < 0) 4319 return -ENODEV; 4320 h->product_name = products[prod_index].product_name; 4321 h->access = *(products[prod_index].access); 4322 4323 if (cciss_board_disabled(h)) { 4324 dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 4325 return -ENODEV; 4326 } 4327 4328 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 4329 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 4330 4331 err = pci_enable_device(h->pdev); 4332 if (err) { 4333 dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); 4334 return err; 4335 } 4336 4337 err = pci_request_regions(h->pdev, "cciss"); 4338 if (err) { 4339 dev_warn(&h->pdev->dev, 4340 "Cannot obtain PCI resources, aborting\n"); 4341 return err; 4342 } 4343 4344 dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); 4345 dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); 4346 4347/* If the kernel supports MSI/MSI-X we will try to enable that functionality, 4348 * else we use the IO-APIC interrupt assigned to us by system ROM. 4349 */ 4350 cciss_interrupt_mode(h); 4351 err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); 4352 if (err) 4353 goto err_out_free_res; 4354 h->vaddr = remap_pci_mem(h->paddr, 0x250); 4355 if (!h->vaddr) { 4356 err = -ENOMEM; 4357 goto err_out_free_res; 4358 } 4359 err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); 4360 if (err) 4361 goto err_out_free_res; 4362 err = cciss_find_cfgtables(h); 4363 if (err) 4364 goto err_out_free_res; 4365 print_cfg_table(h); 4366 cciss_find_board_params(h); 4367 4368 if (!CISS_signature_present(h)) { 4369 err = -ENODEV; 4370 goto err_out_free_res; 4371 } 4372 cciss_enable_scsi_prefetch(h); 4373 cciss_p600_dma_prefetch_quirk(h); 4374 err = cciss_enter_simple_mode(h); 4375 if (err) 4376 goto err_out_free_res; 4377 cciss_put_controller_into_performant_mode(h); 4378 return 0; 4379 4380err_out_free_res: 4381 /* 4382 * Deliberately omit pci_disable_device(): it does something nasty to 4383 * Smart Array controllers that pci_enable_device does not undo 4384 */ 4385 if (h->transtable) 4386 iounmap(h->transtable); 4387 if (h->cfgtable) 4388 iounmap(h->cfgtable); 4389 if (h->vaddr) 4390 iounmap(h->vaddr); 4391 pci_release_regions(h->pdev); 4392 return err; 4393} 4394 4395/* Function to find the first free pointer into our hba[] array 4396 * Returns -1 if no free entries are left. 4397 */ 4398static int alloc_cciss_hba(struct pci_dev *pdev) 4399{ 4400 int i; 4401 4402 for (i = 0; i < MAX_CTLR; i++) { 4403 if (!hba[i]) { 4404 ctlr_info_t *h; 4405 4406 h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); 4407 if (!h) 4408 goto Enomem; 4409 hba[i] = h; 4410 return i; 4411 } 4412 } 4413 dev_warn(&pdev->dev, "This driver supports a maximum" 4414 " of %d controllers.\n", MAX_CTLR); 4415 return -1; 4416Enomem: 4417 dev_warn(&pdev->dev, "out of memory.\n"); 4418 return -1; 4419} 4420 4421static void free_hba(ctlr_info_t *h) 4422{ 4423 int i; 4424 4425 hba[h->ctlr] = NULL; 4426 for (i = 0; i < h->highest_lun + 1; i++) 4427 if (h->gendisk[i] != NULL) 4428 put_disk(h->gendisk[i]); 4429 kfree(h); 4430} 4431 4432/* Send a message CDB to the firmware. */ 4433static int cciss_message(struct pci_dev *pdev, unsigned char opcode, 4434 unsigned char type) 4435{ 4436 typedef struct { 4437 CommandListHeader_struct CommandHeader; 4438 RequestBlock_struct Request; 4439 ErrDescriptor_struct ErrorDescriptor; 4440 } Command; 4441 static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); 4442 Command *cmd; 4443 dma_addr_t paddr64; 4444 uint32_t paddr32, tag; 4445 void __iomem *vaddr; 4446 int i, err; 4447 4448 vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); 4449 if (vaddr == NULL) 4450 return -ENOMEM; 4451 4452 /* The Inbound Post Queue only accepts 32-bit physical addresses for the 4453 CCISS commands, so they must be allocated from the lower 4GiB of 4454 memory. */ 4455 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4456 if (err) { 4457 iounmap(vaddr); 4458 return -ENOMEM; 4459 } 4460 4461 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); 4462 if (cmd == NULL) { 4463 iounmap(vaddr); 4464 return -ENOMEM; 4465 } 4466 4467 /* This must fit, because of the 32-bit consistent DMA mask. Also, 4468 although there's no guarantee, we assume that the address is at 4469 least 4-byte aligned (most likely, it's page-aligned). */ 4470 paddr32 = paddr64; 4471 4472 cmd->CommandHeader.ReplyQueue = 0; 4473 cmd->CommandHeader.SGList = 0; 4474 cmd->CommandHeader.SGTotal = 0; 4475 cmd->CommandHeader.Tag.lower = paddr32; 4476 cmd->CommandHeader.Tag.upper = 0; 4477 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); 4478 4479 cmd->Request.CDBLen = 16; 4480 cmd->Request.Type.Type = TYPE_MSG; 4481 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; 4482 cmd->Request.Type.Direction = XFER_NONE; 4483 cmd->Request.Timeout = 0; /* Don't time out */ 4484 cmd->Request.CDB[0] = opcode; 4485 cmd->Request.CDB[1] = type; 4486 memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ 4487 4488 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); 4489 cmd->ErrorDescriptor.Addr.upper = 0; 4490 cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); 4491 4492 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); 4493 4494 for (i = 0; i < 10; i++) { 4495 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 4496 if ((tag & ~3) == paddr32) 4497 break; 4498 msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS); 4499 } 4500 4501 iounmap(vaddr); 4502 4503 /* we leak the DMA buffer here ... no choice since the controller could 4504 still complete the command. */ 4505 if (i == 10) { 4506 dev_err(&pdev->dev, 4507 "controller message %02x:%02x timed out\n", 4508 opcode, type); 4509 return -ETIMEDOUT; 4510 } 4511 4512 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 4513 4514 if (tag & 2) { 4515 dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 4516 opcode, type); 4517 return -EIO; 4518 } 4519 4520 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 4521 opcode, type); 4522 return 0; 4523} 4524 4525#define cciss_noop(p) cciss_message(p, 3, 0) 4526 4527static int cciss_controller_hard_reset(struct pci_dev *pdev, 4528 void * __iomem vaddr, u32 use_doorbell) 4529{ 4530 u16 pmcsr; 4531 int pos; 4532 4533 if (use_doorbell) { 4534 /* For everything after the P600, the PCI power state method 4535 * of resetting the controller doesn't work, so we have this 4536 * other way using the doorbell register. 4537 */ 4538 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 4539 writel(use_doorbell, vaddr + SA5_DOORBELL); 4540 } else { /* Try to do it the PCI power state way */ 4541 4542 /* Quoting from the Open CISS Specification: "The Power 4543 * Management Control/Status Register (CSR) controls the power 4544 * state of the device. The normal operating state is D0, 4545 * CSR=00h. The software off state is D3, CSR=03h. To reset 4546 * the controller, place the interface device in D3 then to D0, 4547 * this causes a secondary PCI reset which will reset the 4548 * controller." */ 4549 4550 pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 4551 if (pos == 0) { 4552 dev_err(&pdev->dev, 4553 "cciss_controller_hard_reset: " 4554 "PCI PM not supported\n"); 4555 return -ENODEV; 4556 } 4557 dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 4558 /* enter the D3hot power management state */ 4559 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 4560 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4561 pmcsr |= PCI_D3hot; 4562 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4563 4564 msleep(500); 4565 4566 /* enter the D0 power management state */ 4567 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4568 pmcsr |= PCI_D0; 4569 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4570 4571 /* 4572 * The P600 requires a small delay when changing states. 4573 * Otherwise we may think the board did not reset and we bail. 4574 * This for kdump only and is particular to the P600. 4575 */ 4576 msleep(500); 4577 } 4578 return 0; 4579} 4580 4581static void init_driver_version(char *driver_version, int len) 4582{ 4583 memset(driver_version, 0, len); 4584 strncpy(driver_version, "cciss " DRIVER_NAME, len - 1); 4585} 4586 4587static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable) 4588{ 4589 char *driver_version; 4590 int i, size = sizeof(cfgtable->driver_version); 4591 4592 driver_version = kmalloc(size, GFP_KERNEL); 4593 if (!driver_version) 4594 return -ENOMEM; 4595 4596 init_driver_version(driver_version, size); 4597 for (i = 0; i < size; i++) 4598 writeb(driver_version[i], &cfgtable->driver_version[i]); 4599 kfree(driver_version); 4600 return 0; 4601} 4602 4603static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable, 4604 unsigned char *driver_ver) 4605{ 4606 int i; 4607 4608 for (i = 0; i < sizeof(cfgtable->driver_version); i++) 4609 driver_ver[i] = readb(&cfgtable->driver_version[i]); 4610} 4611 4612static int controller_reset_failed(CfgTable_struct __iomem *cfgtable) 4613{ 4614 4615 char *driver_ver, *old_driver_ver; 4616 int rc, size = sizeof(cfgtable->driver_version); 4617 4618 old_driver_ver = kmalloc(2 * size, GFP_KERNEL); 4619 if (!old_driver_ver) 4620 return -ENOMEM; 4621 driver_ver = old_driver_ver + size; 4622 4623 /* After a reset, the 32 bytes of "driver version" in the cfgtable 4624 * should have been changed, otherwise we know the reset failed. 4625 */ 4626 init_driver_version(old_driver_ver, size); 4627 read_driver_ver_from_cfgtable(cfgtable, driver_ver); 4628 rc = !memcmp(driver_ver, old_driver_ver, size); 4629 kfree(old_driver_ver); 4630 return rc; 4631} 4632 4633/* This does a hard reset of the controller using PCI power management 4634 * states or using the doorbell register. */ 4635static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4636{ 4637 u64 cfg_offset; 4638 u32 cfg_base_addr; 4639 u64 cfg_base_addr_index; 4640 void __iomem *vaddr; 4641 unsigned long paddr; 4642 u32 misc_fw_support; 4643 int rc; 4644 CfgTable_struct __iomem *cfgtable; 4645 u32 use_doorbell; 4646 u32 board_id; 4647 u16 command_register; 4648 4649 /* For controllers as old a the p600, this is very nearly 4650 * the same thing as 4651 * 4652 * pci_save_state(pci_dev); 4653 * pci_set_power_state(pci_dev, PCI_D3hot); 4654 * pci_set_power_state(pci_dev, PCI_D0); 4655 * pci_restore_state(pci_dev); 4656 * 4657 * For controllers newer than the P600, the pci power state 4658 * method of resetting doesn't work so we have another way 4659 * using the doorbell register. 4660 */ 4661 4662 /* Exclude 640x boards. These are two pci devices in one slot 4663 * which share a battery backed cache module. One controls the 4664 * cache, the other accesses the cache through the one that controls 4665 * it. If we reset the one controlling the cache, the other will 4666 * likely not be happy. Just forbid resetting this conjoined mess. 4667 */ 4668 cciss_lookup_board_id(pdev, &board_id); 4669 if (!ctlr_is_resettable(board_id)) { 4670 dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " 4671 "due to shared cache module."); 4672 return -ENODEV; 4673 } 4674 4675 /* if controller is soft- but not hard resettable... */ 4676 if (!ctlr_is_hard_resettable(board_id)) 4677 return -ENOTSUPP; /* try soft reset later. */ 4678 4679 /* Save the PCI command register */ 4680 pci_read_config_word(pdev, 4, &command_register); 4681 /* Turn the board off. This is so that later pci_restore_state() 4682 * won't turn the board on before the rest of config space is ready. 4683 */ 4684 pci_disable_device(pdev); 4685 pci_save_state(pdev); 4686 4687 /* find the first memory BAR, so we can find the cfg table */ 4688 rc = cciss_pci_find_memory_BAR(pdev, &paddr); 4689 if (rc) 4690 return rc; 4691 vaddr = remap_pci_mem(paddr, 0x250); 4692 if (!vaddr) 4693 return -ENOMEM; 4694 4695 /* find cfgtable in order to check if reset via doorbell is supported */ 4696 rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 4697 &cfg_base_addr_index, &cfg_offset); 4698 if (rc) 4699 goto unmap_vaddr; 4700 cfgtable = remap_pci_mem(pci_resource_start(pdev, 4701 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 4702 if (!cfgtable) { 4703 rc = -ENOMEM; 4704 goto unmap_vaddr; 4705 } 4706 rc = write_driver_ver_to_cfgtable(cfgtable); 4707 if (rc) 4708 goto unmap_vaddr; 4709 4710 /* If reset via doorbell register is supported, use that. 4711 * There are two such methods. Favor the newest method. 4712 */ 4713 misc_fw_support = readl(&cfgtable->misc_fw_support); 4714 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; 4715 if (use_doorbell) { 4716 use_doorbell = DOORBELL_CTLR_RESET2; 4717 } else { 4718 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4719 if (use_doorbell) { 4720 dev_warn(&pdev->dev, "Controller claims that " 4721 "'Bit 2 doorbell reset' is " 4722 "supported, but not 'bit 5 doorbell reset'. " 4723 "Firmware update is recommended.\n"); 4724 rc = -ENOTSUPP; /* use the soft reset */ 4725 goto unmap_cfgtable; 4726 } 4727 } 4728 4729 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4730 if (rc) 4731 goto unmap_cfgtable; 4732 pci_restore_state(pdev); 4733 rc = pci_enable_device(pdev); 4734 if (rc) { 4735 dev_warn(&pdev->dev, "failed to enable device.\n"); 4736 goto unmap_cfgtable; 4737 } 4738 pci_write_config_word(pdev, 4, command_register); 4739 4740 /* Some devices (notably the HP Smart Array 5i Controller) 4741 need a little pause here */ 4742 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4743 4744 /* Wait for board to become not ready, then ready. */ 4745 dev_info(&pdev->dev, "Waiting for board to reset.\n"); 4746 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); 4747 if (rc) { 4748 dev_warn(&pdev->dev, "Failed waiting for board to hard reset." 4749 " Will try soft reset.\n"); 4750 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 4751 goto unmap_cfgtable; 4752 } 4753 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); 4754 if (rc) { 4755 dev_warn(&pdev->dev, 4756 "failed waiting for board to become ready " 4757 "after hard reset\n"); 4758 goto unmap_cfgtable; 4759 } 4760 4761 rc = controller_reset_failed(vaddr); 4762 if (rc < 0) 4763 goto unmap_cfgtable; 4764 if (rc) { 4765 dev_warn(&pdev->dev, "Unable to successfully hard reset " 4766 "controller. Will try soft reset.\n"); 4767 rc = -ENOTSUPP; /* Not expected, but try soft reset later */ 4768 } else { 4769 dev_info(&pdev->dev, "Board ready after hard reset.\n"); 4770 } 4771 4772unmap_cfgtable: 4773 iounmap(cfgtable); 4774 4775unmap_vaddr: 4776 iounmap(vaddr); 4777 return rc; 4778} 4779 4780static int cciss_init_reset_devices(struct pci_dev *pdev) 4781{ 4782 int rc, i; 4783 4784 if (!reset_devices) 4785 return 0; 4786 4787 /* Reset the controller with a PCI power-cycle or via doorbell */ 4788 rc = cciss_kdump_hard_reset_controller(pdev); 4789 4790 /* -ENOTSUPP here means we cannot reset the controller 4791 * but it's already (and still) up and running in 4792 * "performant mode". Or, it might be 640x, which can't reset 4793 * due to concerns about shared bbwc between 6402/6404 pair. 4794 */ 4795 if (rc == -ENOTSUPP) 4796 return rc; /* just try to do the kdump anyhow. */ 4797 if (rc) 4798 return -ENODEV; 4799 4800 /* Now try to get the controller to respond to a no-op */ 4801 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 4802 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4803 if (cciss_noop(pdev) == 0) 4804 break; 4805 else 4806 dev_warn(&pdev->dev, "no-op failed%s\n", 4807 (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? 4808 "; re-trying" : "")); 4809 msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); 4810 } 4811 return 0; 4812} 4813 4814static int cciss_allocate_cmd_pool(ctlr_info_t *h) 4815{ 4816 h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) * 4817 sizeof(unsigned long), GFP_KERNEL); 4818 h->cmd_pool = pci_alloc_consistent(h->pdev, 4819 h->nr_cmds * sizeof(CommandList_struct), 4820 &(h->cmd_pool_dhandle)); 4821 h->errinfo_pool = pci_alloc_consistent(h->pdev, 4822 h->nr_cmds * sizeof(ErrorInfo_struct), 4823 &(h->errinfo_pool_dhandle)); 4824 if ((h->cmd_pool_bits == NULL) 4825 || (h->cmd_pool == NULL) 4826 || (h->errinfo_pool == NULL)) { 4827 dev_err(&h->pdev->dev, "out of memory"); 4828 return -ENOMEM; 4829 } 4830 return 0; 4831} 4832 4833static int cciss_allocate_scatterlists(ctlr_info_t *h) 4834{ 4835 int i; 4836 4837 /* zero it, so that on free we need not know how many were alloc'ed */ 4838 h->scatter_list = kzalloc(h->max_commands * 4839 sizeof(struct scatterlist *), GFP_KERNEL); 4840 if (!h->scatter_list) 4841 return -ENOMEM; 4842 4843 for (i = 0; i < h->nr_cmds; i++) { 4844 h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) * 4845 h->maxsgentries, GFP_KERNEL); 4846 if (h->scatter_list[i] == NULL) { 4847 dev_err(&h->pdev->dev, "could not allocate " 4848 "s/g lists\n"); 4849 return -ENOMEM; 4850 } 4851 } 4852 return 0; 4853} 4854 4855static void cciss_free_scatterlists(ctlr_info_t *h) 4856{ 4857 int i; 4858 4859 if (h->scatter_list) { 4860 for (i = 0; i < h->nr_cmds; i++) 4861 kfree(h->scatter_list[i]); 4862 kfree(h->scatter_list); 4863 } 4864} 4865 4866static void cciss_free_cmd_pool(ctlr_info_t *h) 4867{ 4868 kfree(h->cmd_pool_bits); 4869 if (h->cmd_pool) 4870 pci_free_consistent(h->pdev, 4871 h->nr_cmds * sizeof(CommandList_struct), 4872 h->cmd_pool, h->cmd_pool_dhandle); 4873 if (h->errinfo_pool) 4874 pci_free_consistent(h->pdev, 4875 h->nr_cmds * sizeof(ErrorInfo_struct), 4876 h->errinfo_pool, h->errinfo_pool_dhandle); 4877} 4878 4879static int cciss_request_irq(ctlr_info_t *h, 4880 irqreturn_t (*msixhandler)(int, void *), 4881 irqreturn_t (*intxhandler)(int, void *)) 4882{ 4883 if (h->msix_vector || h->msi_vector) { 4884 if (!request_irq(h->intr[h->intr_mode], msixhandler, 4885 0, h->devname, h)) 4886 return 0; 4887 dev_err(&h->pdev->dev, "Unable to get msi irq %d" 4888 " for %s\n", h->intr[h->intr_mode], 4889 h->devname); 4890 return -1; 4891 } 4892 4893 if (!request_irq(h->intr[h->intr_mode], intxhandler, 4894 IRQF_SHARED, h->devname, h)) 4895 return 0; 4896 dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", 4897 h->intr[h->intr_mode], h->devname); 4898 return -1; 4899} 4900 4901static int cciss_kdump_soft_reset(ctlr_info_t *h) 4902{ 4903 if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) { 4904 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); 4905 return -EIO; 4906 } 4907 4908 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); 4909 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { 4910 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); 4911 return -1; 4912 } 4913 4914 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); 4915 if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { 4916 dev_warn(&h->pdev->dev, "Board failed to become ready " 4917 "after soft reset.\n"); 4918 return -1; 4919 } 4920 4921 return 0; 4922} 4923 4924static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) 4925{ 4926 int ctlr = h->ctlr; 4927 4928 free_irq(h->intr[h->intr_mode], h); 4929#ifdef CONFIG_PCI_MSI 4930 if (h->msix_vector) 4931 pci_disable_msix(h->pdev); 4932 else if (h->msi_vector) 4933 pci_disable_msi(h->pdev); 4934#endif /* CONFIG_PCI_MSI */ 4935 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4936 cciss_free_scatterlists(h); 4937 cciss_free_cmd_pool(h); 4938 kfree(h->blockFetchTable); 4939 if (h->reply_pool) 4940 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), 4941 h->reply_pool, h->reply_pool_dhandle); 4942 if (h->transtable) 4943 iounmap(h->transtable); 4944 if (h->cfgtable) 4945 iounmap(h->cfgtable); 4946 if (h->vaddr) 4947 iounmap(h->vaddr); 4948 unregister_blkdev(h->major, h->devname); 4949 cciss_destroy_hba_sysfs_entry(h); 4950 pci_release_regions(h->pdev); 4951 kfree(h); 4952 hba[ctlr] = NULL; 4953} 4954 4955/* 4956 * This is it. Find all the controllers and register them. I really hate 4957 * stealing all these major device numbers. 4958 * returns the number of block devices registered. 4959 */ 4960static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 4961{ 4962 int i; 4963 int j = 0; 4964 int rc; 4965 int try_soft_reset = 0; 4966 int dac, return_code; 4967 InquiryData_struct *inq_buff; 4968 ctlr_info_t *h; 4969 unsigned long flags; 4970 4971 /* 4972 * By default the cciss driver is used for all older HP Smart Array 4973 * controllers. There are module paramaters that allow a user to 4974 * override this behavior and instead use the hpsa SCSI driver. If 4975 * this is the case cciss may be loaded first from the kdump initrd 4976 * image and cause a kernel panic. So if reset_devices is true and 4977 * cciss_allow_hpsa is set just bail. 4978 */ 4979 if ((reset_devices) && (cciss_allow_hpsa == 1)) 4980 return -ENODEV; 4981 rc = cciss_init_reset_devices(pdev); 4982 if (rc) { 4983 if (rc != -ENOTSUPP) 4984 return rc; 4985 /* If the reset fails in a particular way (it has no way to do 4986 * a proper hard reset, so returns -ENOTSUPP) we can try to do 4987 * a soft reset once we get the controller configured up to the 4988 * point that it can accept a command. 4989 */ 4990 try_soft_reset = 1; 4991 rc = 0; 4992 } 4993 4994reinit_after_soft_reset: 4995 4996 i = alloc_cciss_hba(pdev); 4997 if (i < 0) 4998 return -ENOMEM; 4999 5000 h = hba[i]; 5001 h->pdev = pdev; 5002 h->busy_initializing = 1; 5003 h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 5004 INIT_LIST_HEAD(&h->cmpQ); 5005 INIT_LIST_HEAD(&h->reqQ); 5006 mutex_init(&h->busy_shutting_down); 5007 5008 if (cciss_pci_init(h) != 0) 5009 goto clean_no_release_regions; 5010 5011 sprintf(h->devname, "cciss%d", i); 5012 h->ctlr = i; 5013 5014 if (cciss_tape_cmds < 2) 5015 cciss_tape_cmds = 2; 5016 if (cciss_tape_cmds > 16) 5017 cciss_tape_cmds = 16; 5018 5019 init_completion(&h->scan_wait); 5020 5021 if (cciss_create_hba_sysfs_entry(h)) 5022 goto clean0; 5023 5024 /* configure PCI DMA stuff */ 5025 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) 5026 dac = 1; 5027 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 5028 dac = 0; 5029 else { 5030 dev_err(&h->pdev->dev, "no suitable DMA available\n"); 5031 goto clean1; 5032 } 5033 5034 /* 5035 * register with the major number, or get a dynamic major number 5036 * by passing 0 as argument. This is done for greater than 5037 * 8 controller support. 5038 */ 5039 if (i < MAX_CTLR_ORIG) 5040 h->major = COMPAQ_CISS_MAJOR + i; 5041 rc = register_blkdev(h->major, h->devname); 5042 if (rc == -EBUSY || rc == -EINVAL) { 5043 dev_err(&h->pdev->dev, 5044 "Unable to get major number %d for %s " 5045 "on hba %d\n", h->major, h->devname, i); 5046 goto clean1; 5047 } else { 5048 if (i >= MAX_CTLR_ORIG) 5049 h->major = rc; 5050 } 5051 5052 /* make sure the board interrupts are off */ 5053 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5054 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); 5055 if (rc) 5056 goto clean2; 5057 5058 dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 5059 h->devname, pdev->device, pci_name(pdev), 5060 h->intr[h->intr_mode], dac ? "" : " not"); 5061 5062 if (cciss_allocate_cmd_pool(h)) 5063 goto clean4; 5064 5065 if (cciss_allocate_scatterlists(h)) 5066 goto clean4; 5067 5068 h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 5069 h->chainsize, h->nr_cmds); 5070 if (!h->cmd_sg_list && h->chainsize > 0) 5071 goto clean4; 5072 5073 spin_lock_init(&h->lock); 5074 5075 /* Initialize the pdev driver private data. 5076 have it point to h. */ 5077 pci_set_drvdata(pdev, h); 5078 /* command and error info recs zeroed out before 5079 they are used */ 5080 bitmap_zero(h->cmd_pool_bits, h->nr_cmds); 5081 5082 h->num_luns = 0; 5083 h->highest_lun = -1; 5084 for (j = 0; j < CISS_MAX_LUN; j++) { 5085 h->drv[j] = NULL; 5086 h->gendisk[j] = NULL; 5087 } 5088 5089 /* At this point, the controller is ready to take commands. 5090 * Now, if reset_devices and the hard reset didn't work, try 5091 * the soft reset and see if that works. 5092 */ 5093 if (try_soft_reset) { 5094 5095 /* This is kind of gross. We may or may not get a completion 5096 * from the soft reset command, and if we do, then the value 5097 * from the fifo may or may not be valid. So, we wait 10 secs 5098 * after the reset throwing away any completions we get during 5099 * that time. Unregister the interrupt handler and register 5100 * fake ones to scoop up any residual completions. 5101 */ 5102 spin_lock_irqsave(&h->lock, flags); 5103 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5104 spin_unlock_irqrestore(&h->lock, flags); 5105 free_irq(h->intr[h->intr_mode], h); 5106 rc = cciss_request_irq(h, cciss_msix_discard_completions, 5107 cciss_intx_discard_completions); 5108 if (rc) { 5109 dev_warn(&h->pdev->dev, "Failed to request_irq after " 5110 "soft reset.\n"); 5111 goto clean4; 5112 } 5113 5114 rc = cciss_kdump_soft_reset(h); 5115 if (rc) { 5116 dev_warn(&h->pdev->dev, "Soft reset failed.\n"); 5117 goto clean4; 5118 } 5119 5120 dev_info(&h->pdev->dev, "Board READY.\n"); 5121 dev_info(&h->pdev->dev, 5122 "Waiting for stale completions to drain.\n"); 5123 h->access.set_intr_mask(h, CCISS_INTR_ON); 5124 msleep(10000); 5125 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5126 5127 rc = controller_reset_failed(h->cfgtable); 5128 if (rc) 5129 dev_info(&h->pdev->dev, 5130 "Soft reset appears to have failed.\n"); 5131 5132 /* since the controller's reset, we have to go back and re-init 5133 * everything. Easiest to just forget what we've done and do it 5134 * all over again. 5135 */ 5136 cciss_undo_allocations_after_kdump_soft_reset(h); 5137 try_soft_reset = 0; 5138 if (rc) 5139 /* don't go to clean4, we already unallocated */ 5140 return -ENODEV; 5141 5142 goto reinit_after_soft_reset; 5143 } 5144 5145 cciss_scsi_setup(h); 5146 5147 /* Turn the interrupts on so we can service requests */ 5148 h->access.set_intr_mask(h, CCISS_INTR_ON); 5149 5150 /* Get the firmware version */ 5151 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 5152 if (inq_buff == NULL) { 5153 dev_err(&h->pdev->dev, "out of memory\n"); 5154 goto clean4; 5155 } 5156 5157 return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, 5158 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); 5159 if (return_code == IO_OK) { 5160 h->firm_ver[0] = inq_buff->data_byte[32]; 5161 h->firm_ver[1] = inq_buff->data_byte[33]; 5162 h->firm_ver[2] = inq_buff->data_byte[34]; 5163 h->firm_ver[3] = inq_buff->data_byte[35]; 5164 } else { /* send command failed */ 5165 dev_warn(&h->pdev->dev, "unable to determine firmware" 5166 " version of controller\n"); 5167 } 5168 kfree(inq_buff); 5169 5170 cciss_procinit(h); 5171 5172 h->cciss_max_sectors = 8192; 5173 5174 rebuild_lun_table(h, 1, 0); 5175 cciss_engage_scsi(h); 5176 h->busy_initializing = 0; 5177 return 0; 5178 5179clean4: 5180 cciss_free_cmd_pool(h); 5181 cciss_free_scatterlists(h); 5182 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5183 free_irq(h->intr[h->intr_mode], h); 5184clean2: 5185 unregister_blkdev(h->major, h->devname); 5186clean1: 5187 cciss_destroy_hba_sysfs_entry(h); 5188clean0: 5189 pci_release_regions(pdev); 5190clean_no_release_regions: 5191 h->busy_initializing = 0; 5192 5193 /* 5194 * Deliberately omit pci_disable_device(): it does something nasty to 5195 * Smart Array controllers that pci_enable_device does not undo 5196 */ 5197 pci_set_drvdata(pdev, NULL); 5198 free_hba(h); 5199 return -ENODEV; 5200} 5201 5202static void cciss_shutdown(struct pci_dev *pdev) 5203{ 5204 ctlr_info_t *h; 5205 char *flush_buf; 5206 int return_code; 5207 5208 h = pci_get_drvdata(pdev); 5209 flush_buf = kzalloc(4, GFP_KERNEL); 5210 if (!flush_buf) { 5211 dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); 5212 return; 5213 } 5214 /* write all data in the battery backed cache to disk */ 5215 return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 5216 4, 0, CTLR_LUNID, TYPE_CMD); 5217 kfree(flush_buf); 5218 if (return_code != IO_OK) 5219 dev_warn(&h->pdev->dev, "Error flushing cache\n"); 5220 h->access.set_intr_mask(h, CCISS_INTR_OFF); 5221 free_irq(h->intr[h->intr_mode], h); 5222} 5223 5224static int cciss_enter_simple_mode(struct ctlr_info *h) 5225{ 5226 u32 trans_support; 5227 5228 trans_support = readl(&(h->cfgtable->TransportSupport)); 5229 if (!(trans_support & SIMPLE_MODE)) 5230 return -ENOTSUPP; 5231 5232 h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); 5233 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); 5234 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 5235 cciss_wait_for_mode_change_ack(h); 5236 print_cfg_table(h); 5237 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 5238 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); 5239 return -ENODEV; 5240 } 5241 h->transMethod = CFGTBL_Trans_Simple; 5242 return 0; 5243} 5244 5245 5246static void cciss_remove_one(struct pci_dev *pdev) 5247{ 5248 ctlr_info_t *h; 5249 int i, j; 5250 5251 if (pci_get_drvdata(pdev) == NULL) { 5252 dev_err(&pdev->dev, "Unable to remove device\n"); 5253 return; 5254 } 5255 5256 h = pci_get_drvdata(pdev); 5257 i = h->ctlr; 5258 if (hba[i] == NULL) { 5259 dev_err(&pdev->dev, "device appears to already be removed\n"); 5260 return; 5261 } 5262 5263 mutex_lock(&h->busy_shutting_down); 5264 5265 remove_from_scan_list(h); 5266 remove_proc_entry(h->devname, proc_cciss); 5267 unregister_blkdev(h->major, h->devname); 5268 5269 /* remove it from the disk list */ 5270 for (j = 0; j < CISS_MAX_LUN; j++) { 5271 struct gendisk *disk = h->gendisk[j]; 5272 if (disk) { 5273 struct request_queue *q = disk->queue; 5274 5275 if (disk->flags & GENHD_FL_UP) { 5276 cciss_destroy_ld_sysfs_entry(h, j, 1); 5277 del_gendisk(disk); 5278 } 5279 if (q) 5280 blk_cleanup_queue(q); 5281 } 5282 } 5283 5284#ifdef CONFIG_CISS_SCSI_TAPE 5285 cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ 5286#endif 5287 5288 cciss_shutdown(pdev); 5289 5290#ifdef CONFIG_PCI_MSI 5291 if (h->msix_vector) 5292 pci_disable_msix(h->pdev); 5293 else if (h->msi_vector) 5294 pci_disable_msi(h->pdev); 5295#endif /* CONFIG_PCI_MSI */ 5296 5297 iounmap(h->transtable); 5298 iounmap(h->cfgtable); 5299 iounmap(h->vaddr); 5300 5301 cciss_free_cmd_pool(h); 5302 /* Free up sg elements */ 5303 for (j = 0; j < h->nr_cmds; j++) 5304 kfree(h->scatter_list[j]); 5305 kfree(h->scatter_list); 5306 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 5307 kfree(h->blockFetchTable); 5308 if (h->reply_pool) 5309 pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), 5310 h->reply_pool, h->reply_pool_dhandle); 5311 /* 5312 * Deliberately omit pci_disable_device(): it does something nasty to 5313 * Smart Array controllers that pci_enable_device does not undo 5314 */ 5315 pci_release_regions(pdev); 5316 pci_set_drvdata(pdev, NULL); 5317 cciss_destroy_hba_sysfs_entry(h); 5318 mutex_unlock(&h->busy_shutting_down); 5319 free_hba(h); 5320} 5321 5322static struct pci_driver cciss_pci_driver = { 5323 .name = "cciss", 5324 .probe = cciss_init_one, 5325 .remove = cciss_remove_one, 5326 .id_table = cciss_pci_device_id, /* id_table */ 5327 .shutdown = cciss_shutdown, 5328}; 5329 5330/* 5331 * This is it. Register the PCI driver information for the cards we control 5332 * the OS will call our registered routines when it finds one of our cards. 5333 */ 5334static int __init cciss_init(void) 5335{ 5336 int err; 5337 5338 /* 5339 * The hardware requires that commands are aligned on a 64-bit 5340 * boundary. Given that we use pci_alloc_consistent() to allocate an 5341 * array of them, the size must be a multiple of 8 bytes. 5342 */ 5343 BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); 5344 printk(KERN_INFO DRIVER_NAME "\n"); 5345 5346 err = bus_register(&cciss_bus_type); 5347 if (err) 5348 return err; 5349 5350 /* Start the scan thread */ 5351 cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); 5352 if (IS_ERR(cciss_scan_thread)) { 5353 err = PTR_ERR(cciss_scan_thread); 5354 goto err_bus_unregister; 5355 } 5356 5357 /* Register for our PCI devices */ 5358 err = pci_register_driver(&cciss_pci_driver); 5359 if (err) 5360 goto err_thread_stop; 5361 5362 return err; 5363 5364err_thread_stop: 5365 kthread_stop(cciss_scan_thread); 5366err_bus_unregister: 5367 bus_unregister(&cciss_bus_type); 5368 5369 return err; 5370} 5371 5372static void __exit cciss_cleanup(void) 5373{ 5374 int i; 5375 5376 pci_unregister_driver(&cciss_pci_driver); 5377 /* double check that all controller entrys have been removed */ 5378 for (i = 0; i < MAX_CTLR; i++) { 5379 if (hba[i] != NULL) { 5380 dev_warn(&hba[i]->pdev->dev, 5381 "had to remove controller\n"); 5382 cciss_remove_one(hba[i]->pdev); 5383 } 5384 } 5385 kthread_stop(cciss_scan_thread); 5386 if (proc_cciss) 5387 remove_proc_entry("driver/cciss", NULL); 5388 bus_unregister(&cciss_bus_type); 5389} 5390 5391module_init(cciss_init); 5392module_exit(cciss_cleanup); 5393