1 /***************************************************************************
2                           dpti.c  -  description
3                              -------------------
4     begin                : Thu Sep 7 2000
5     copyright            : (C) 2000 by Adaptec
6 
7 			   July 30, 2001 First version being submitted
8 			   for inclusion in the kernel.  V2.4
9 
10     See Documentation/scsi/dpti.txt for history, notes, license info
11     and credits
12  ***************************************************************************/
13 
14 /***************************************************************************
15  *                                                                         *
16  *   This program is free software; you can redistribute it and/or modify  *
17  *   it under the terms of the GNU General Public License as published by  *
18  *   the Free Software Foundation; either version 2 of the License, or     *
19  *   (at your option) any later version.                                   *
20  *                                                                         *
21  ***************************************************************************/
22 /***************************************************************************
23  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24  - Support 2.6 kernel and DMA-mapping
25  - ioctl fix for raid tools
26  - use schedule_timeout in long long loop
27  **************************************************************************/
28 
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
31 
32 #include <linux/module.h>
33 
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36 
37 ////////////////////////////////////////////////////////////////
38 
39 #include <linux/ioctl.h>	/* For SCSI-Passthrough */
40 #include <asm/uaccess.h>
41 
42 #include <linux/stat.h>
43 #include <linux/slab.h>		/* for kmalloc() */
44 #include <linux/pci.h>		/* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>	/* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h>	/* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
54 
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
59 
60 #include <asm/processor.h>	/* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h>		/* for virt_to_bus, etc. */
63 
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
69 
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
72 
73 /*============================================================================
74  * Create a binary signature - this is read by dptsig
75  * Needed for our management apps
76  *============================================================================
77  */
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 	PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 	PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 	PROC_ALPHA, PROC_ALPHA,
89 #else
90 	(-1),(-1),
91 #endif
92 	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95 };
96 
97 
98 
99 
100 /*============================================================================
101  * Globals
102  *============================================================================
103  */
104 
105 static DEFINE_MUTEX(adpt_configuration_lock);
106 
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
111 
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
114 
115 static struct class *adpt_sysfs_class;
116 
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
121 
122 static const struct file_operations adpt_fops = {
123 	.unlocked_ioctl	= adpt_unlocked_ioctl,
124 	.open		= adpt_open,
125 	.release	= adpt_close,
126 #ifdef CONFIG_COMPAT
127 	.compat_ioctl	= compat_adpt_ioctl,
128 #endif
129 	.llseek		= noop_llseek,
130 };
131 
132 /* Structures and definitions for synchronous message posting.
133  * See adpt_i2o_post_wait() for description
134  * */
135 struct adpt_i2o_post_wait_data
136 {
137 	int status;
138 	u32 id;
139 	adpt_wait_queue_head_t *wq;
140 	struct adpt_i2o_post_wait_data *next;
141 };
142 
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
146 
147 
148 /*============================================================================
149  * 				Functions
150  *============================================================================
151  */
152 
dpt_dma64(adpt_hba * pHba)153 static inline int dpt_dma64(adpt_hba *pHba)
154 {
155 	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156 }
157 
dma_high(dma_addr_t addr)158 static inline u32 dma_high(dma_addr_t addr)
159 {
160 	return upper_32_bits(addr);
161 }
162 
dma_low(dma_addr_t addr)163 static inline u32 dma_low(dma_addr_t addr)
164 {
165 	return (u32)addr;
166 }
167 
adpt_read_blink_led(adpt_hba * host)168 static u8 adpt_read_blink_led(adpt_hba* host)
169 {
170 	if (host->FwDebugBLEDflag_P) {
171 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 			return readb(host->FwDebugBLEDvalue_P);
173 		}
174 	}
175 	return 0;
176 }
177 
178 /*============================================================================
179  * Scsi host template interface functions
180  *============================================================================
181  */
182 
183 static struct pci_device_id dptids[] = {
184 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 	{ 0, }
187 };
188 MODULE_DEVICE_TABLE(pci,dptids);
189 
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 	struct pci_dev *pDev = NULL;
193 	adpt_hba *pHba;
194 	adpt_hba *next;
195 
196 	PINFO("Detecting Adaptec I2O RAID controllers...\n");
197 
198         /* search for all Adatpec I2O RAID cards */
199 	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 		if(pDev->device == PCI_DPT_DEVICE_ID ||
201 		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 			if(adpt_install_hba(sht, pDev) ){
203 				PERROR("Could not Init an I2O RAID device\n");
204 				PERROR("Will not try to detect others.\n");
205 				return hba_count-1;
206 			}
207 			pci_dev_get(pDev);
208 		}
209 	}
210 
211 	/* In INIT state, Activate IOPs */
212 	for (pHba = hba_chain; pHba; pHba = next) {
213 		next = pHba->next;
214 		// Activate does get status , init outbound, and get hrt
215 		if (adpt_i2o_activate_hba(pHba) < 0) {
216 			adpt_i2o_delete_hba(pHba);
217 		}
218 	}
219 
220 
221 	/* Active IOPs in HOLD state */
222 
223 rebuild_sys_tab:
224 	if (hba_chain == NULL)
225 		return 0;
226 
227 	/*
228 	 * If build_sys_table fails, we kill everything and bail
229 	 * as we can't init the IOPs w/o a system table
230 	 */
231 	if (adpt_i2o_build_sys_table() < 0) {
232 		adpt_i2o_sys_shutdown();
233 		return 0;
234 	}
235 
236 	PDEBUG("HBA's in HOLD state\n");
237 
238 	/* If IOP don't get online, we need to rebuild the System table */
239 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 		if (adpt_i2o_online_hba(pHba) < 0) {
241 			adpt_i2o_delete_hba(pHba);
242 			goto rebuild_sys_tab;
243 		}
244 	}
245 
246 	/* Active IOPs now in OPERATIONAL state */
247 	PDEBUG("HBA's in OPERATIONAL state\n");
248 
249 	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 	for (pHba = hba_chain; pHba; pHba = next) {
251 		next = pHba->next;
252 		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 		if (adpt_i2o_lct_get(pHba) < 0){
254 			adpt_i2o_delete_hba(pHba);
255 			continue;
256 		}
257 
258 		if (adpt_i2o_parse_lct(pHba) < 0){
259 			adpt_i2o_delete_hba(pHba);
260 			continue;
261 		}
262 		adpt_inquiry(pHba);
263 	}
264 
265 	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 	if (IS_ERR(adpt_sysfs_class)) {
267 		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 		adpt_sysfs_class = NULL;
269 	}
270 
271 	for (pHba = hba_chain; pHba; pHba = next) {
272 		next = pHba->next;
273 		if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 			adpt_i2o_delete_hba(pHba);
275 			continue;
276 		}
277 		pHba->initialized = TRUE;
278 		pHba->state &= ~DPTI_STATE_RESET;
279 		if (adpt_sysfs_class) {
280 			struct device *dev = device_create(adpt_sysfs_class,
281 				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 				"dpti%d", pHba->unit);
283 			if (IS_ERR(dev)) {
284 				printk(KERN_WARNING"dpti%d: unable to "
285 					"create device in dpt_i2o class\n",
286 					pHba->unit);
287 			}
288 		}
289 	}
290 
291 	// Register our control device node
292 	// nodes will need to be created in /dev to access this
293 	// the nodes can not be created from within the driver
294 	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 		adpt_i2o_sys_shutdown();
296 		return 0;
297 	}
298 	return hba_count;
299 }
300 
301 
302 /*
303  * scsi_unregister will be called AFTER we return.
304  */
adpt_release(struct Scsi_Host * host)305 static int adpt_release(struct Scsi_Host *host)
306 {
307 	adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308 //	adpt_i2o_quiesce_hba(pHba);
309 	adpt_i2o_delete_hba(pHba);
310 	scsi_unregister(host);
311 	return 0;
312 }
313 
314 
adpt_inquiry(adpt_hba * pHba)315 static void adpt_inquiry(adpt_hba* pHba)
316 {
317 	u32 msg[17];
318 	u32 *mptr;
319 	u32 *lenptr;
320 	int direction;
321 	int scsidir;
322 	u32 len;
323 	u32 reqlen;
324 	u8* buf;
325 	dma_addr_t addr;
326 	u8  scb[16];
327 	s32 rcode;
328 
329 	memset(msg, 0, sizeof(msg));
330 	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 	if(!buf){
332 		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 		return;
334 	}
335 	memset((void*)buf, 0, 36);
336 
337 	len = 36;
338 	direction = 0x00000000;
339 	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
340 
341 	if (dpt_dma64(pHba))
342 		reqlen = 17;		// SINGLE SGE, 64 bit
343 	else
344 		reqlen = 14;		// SINGLE SGE, 32 bit
345 	/* Stick the headers on */
346 	msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 	msg[2] = 0;
349 	msg[3]  = 0;
350 	// Adaptec/DPT Private stuff
351 	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358 
359 	mptr=msg+7;
360 
361 	memset(scb, 0, sizeof(scb));
362 	// Write SCSI command into the message - always 16 byte block
363 	scb[0] = INQUIRY;
364 	scb[1] = 0;
365 	scb[2] = 0;
366 	scb[3] = 0;
367 	scb[4] = 36;
368 	scb[5] = 0;
369 	// Don't care about the rest of scb
370 
371 	memcpy(mptr, scb, sizeof(scb));
372 	mptr+=4;
373 	lenptr=mptr++;		/* Remember me - fill in when we know */
374 
375 	/* Now fill in the SGList and command */
376 	*lenptr = len;
377 	if (dpt_dma64(pHba)) {
378 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 		*mptr++ = 1 << PAGE_SHIFT;
380 		*mptr++ = 0xD0000000|direction|len;
381 		*mptr++ = dma_low(addr);
382 		*mptr++ = dma_high(addr);
383 	} else {
384 		*mptr++ = 0xD0000000|direction|len;
385 		*mptr++ = addr;
386 	}
387 
388 	// Send it on it's way
389 	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 	if (rcode != 0) {
391 		sprintf(pHba->detail, "Adaptec I2O RAID");
392 		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 		if (rcode != -ETIME && rcode != -EINTR)
394 			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 	} else {
396 		memset(pHba->detail, 0, sizeof(pHba->detail));
397 		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 		memcpy(&(pHba->detail[16]), " Model: ", 8);
399 		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 		memcpy(&(pHba->detail[40]), " FW: ", 4);
401 		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 		pHba->detail[48] = '\0';	/* precautionary */
403 		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404 	}
405 	adpt_i2o_status_get(pHba);
406 	return ;
407 }
408 
409 
adpt_slave_configure(struct scsi_device * device)410 static int adpt_slave_configure(struct scsi_device * device)
411 {
412 	struct Scsi_Host *host = device->host;
413 	adpt_hba* pHba;
414 
415 	pHba = (adpt_hba *) host->hostdata[0];
416 
417 	if (host->can_queue && device->tagged_supported) {
418 		scsi_change_queue_depth(device,
419 				host->can_queue - 1);
420 	}
421 	return 0;
422 }
423 
adpt_queue_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))424 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
425 {
426 	adpt_hba* pHba = NULL;
427 	struct adpt_device* pDev = NULL;	/* dpt per device information */
428 
429 	cmd->scsi_done = done;
430 	/*
431 	 * SCSI REQUEST_SENSE commands will be executed automatically by the
432 	 * Host Adapter for any errors, so they should not be executed
433 	 * explicitly unless the Sense Data is zero indicating that no error
434 	 * occurred.
435 	 */
436 
437 	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
438 		cmd->result = (DID_OK << 16);
439 		cmd->scsi_done(cmd);
440 		return 0;
441 	}
442 
443 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
444 	if (!pHba) {
445 		return FAILED;
446 	}
447 
448 	rmb();
449 	if ((pHba->state) & DPTI_STATE_RESET)
450 		return SCSI_MLQUEUE_HOST_BUSY;
451 
452 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
453 	// followed by a get_lct to see if the device is there anymore
454 	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
455 		/*
456 		 * First command request for this device.  Set up a pointer
457 		 * to the device structure.  This should be a TEST_UNIT_READY
458 		 * command from scan_scsis_single.
459 		 */
460 		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
461 			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
462 			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
463 			cmd->result = (DID_NO_CONNECT << 16);
464 			cmd->scsi_done(cmd);
465 			return 0;
466 		}
467 		cmd->device->hostdata = pDev;
468 	}
469 	pDev->pScsi_dev = cmd->device;
470 
471 	/*
472 	 * If we are being called from when the device is being reset,
473 	 * delay processing of the command until later.
474 	 */
475 	if (pDev->state & DPTI_DEV_RESET ) {
476 		return FAILED;
477 	}
478 	return adpt_scsi_to_i2o(pHba, cmd, pDev);
479 }
480 
DEF_SCSI_QCMD(adpt_queue)481 static DEF_SCSI_QCMD(adpt_queue)
482 
483 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
484 		sector_t capacity, int geom[])
485 {
486 	int heads=-1;
487 	int sectors=-1;
488 	int cylinders=-1;
489 
490 	// *** First lets set the default geometry ****
491 
492 	// If the capacity is less than ox2000
493 	if (capacity < 0x2000 ) {	// floppy
494 		heads = 18;
495 		sectors = 2;
496 	}
497 	// else if between 0x2000 and 0x20000
498 	else if (capacity < 0x20000) {
499 		heads = 64;
500 		sectors = 32;
501 	}
502 	// else if between 0x20000 and 0x40000
503 	else if (capacity < 0x40000) {
504 		heads = 65;
505 		sectors = 63;
506 	}
507 	// else if between 0x4000 and 0x80000
508 	else if (capacity < 0x80000) {
509 		heads = 128;
510 		sectors = 63;
511 	}
512 	// else if greater than 0x80000
513 	else {
514 		heads = 255;
515 		sectors = 63;
516 	}
517 	cylinders = sector_div(capacity, heads * sectors);
518 
519 	// Special case if CDROM
520 	if(sdev->type == 5) {  // CDROM
521 		heads = 252;
522 		sectors = 63;
523 		cylinders = 1111;
524 	}
525 
526 	geom[0] = heads;
527 	geom[1] = sectors;
528 	geom[2] = cylinders;
529 
530 	PDEBUG("adpt_bios_param: exit\n");
531 	return 0;
532 }
533 
534 
adpt_info(struct Scsi_Host * host)535 static const char *adpt_info(struct Scsi_Host *host)
536 {
537 	adpt_hba* pHba;
538 
539 	pHba = (adpt_hba *) host->hostdata[0];
540 	return (char *) (pHba->detail);
541 }
542 
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)543 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
544 {
545 	struct adpt_device* d;
546 	int id;
547 	int chan;
548 	adpt_hba* pHba;
549 	int unit;
550 
551 	// Find HBA (host bus adapter) we are looking for
552 	mutex_lock(&adpt_configuration_lock);
553 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
554 		if (pHba->host == host) {
555 			break;	/* found adapter */
556 		}
557 	}
558 	mutex_unlock(&adpt_configuration_lock);
559 	if (pHba == NULL) {
560 		return 0;
561 	}
562 	host = pHba->host;
563 
564 	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
565 	seq_printf(m, "%s\n", pHba->detail);
566 	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
567 			pHba->host->host_no, pHba->name, host->irq);
568 	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
569 			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
570 
571 	seq_puts(m, "Devices:\n");
572 	for(chan = 0; chan < MAX_CHANNEL; chan++) {
573 		for(id = 0; id < MAX_ID; id++) {
574 			d = pHba->channel[chan].device[id];
575 			while(d) {
576 				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
577 				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
578 
579 				unit = d->pI2o_dev->lct_data.tid;
580 				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
581 					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
582 					       scsi_device_online(d->pScsi_dev)? "online":"offline");
583 				d = d->next_lun;
584 			}
585 		}
586 	}
587 	return 0;
588 }
589 
590 /*
591  *	Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
592  */
adpt_cmd_to_context(struct scsi_cmnd * cmd)593 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
594 {
595 	return (u32)cmd->serial_number;
596 }
597 
598 /*
599  *	Go from a u32 'context' to a struct scsi_cmnd * .
600  *	This could probably be made more efficient.
601  */
602 static struct scsi_cmnd *
adpt_cmd_from_context(adpt_hba * pHba,u32 context)603 	adpt_cmd_from_context(adpt_hba * pHba, u32 context)
604 {
605 	struct scsi_cmnd * cmd;
606 	struct scsi_device * d;
607 
608 	if (context == 0)
609 		return NULL;
610 
611 	spin_unlock(pHba->host->host_lock);
612 	shost_for_each_device(d, pHba->host) {
613 		unsigned long flags;
614 		spin_lock_irqsave(&d->list_lock, flags);
615 		list_for_each_entry(cmd, &d->cmd_list, list) {
616 			if (((u32)cmd->serial_number == context)) {
617 				spin_unlock_irqrestore(&d->list_lock, flags);
618 				scsi_device_put(d);
619 				spin_lock(pHba->host->host_lock);
620 				return cmd;
621 			}
622 		}
623 		spin_unlock_irqrestore(&d->list_lock, flags);
624 	}
625 	spin_lock(pHba->host->host_lock);
626 
627 	return NULL;
628 }
629 
630 /*
631  *	Turn a pointer to ioctl reply data into an u32 'context'
632  */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)633 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
634 {
635 #if BITS_PER_LONG == 32
636 	return (u32)(unsigned long)reply;
637 #else
638 	ulong flags = 0;
639 	u32 nr, i;
640 
641 	spin_lock_irqsave(pHba->host->host_lock, flags);
642 	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
643 	for (i = 0; i < nr; i++) {
644 		if (pHba->ioctl_reply_context[i] == NULL) {
645 			pHba->ioctl_reply_context[i] = reply;
646 			break;
647 		}
648 	}
649 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
650 	if (i >= nr) {
651 		kfree (reply);
652 		printk(KERN_WARNING"%s: Too many outstanding "
653 				"ioctl commands\n", pHba->name);
654 		return (u32)-1;
655 	}
656 
657 	return i;
658 #endif
659 }
660 
661 /*
662  *	Go from an u32 'context' to a pointer to ioctl reply data.
663  */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)664 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
665 {
666 #if BITS_PER_LONG == 32
667 	return (void *)(unsigned long)context;
668 #else
669 	void *p = pHba->ioctl_reply_context[context];
670 	pHba->ioctl_reply_context[context] = NULL;
671 
672 	return p;
673 #endif
674 }
675 
676 /*===========================================================================
677  * Error Handling routines
678  *===========================================================================
679  */
680 
adpt_abort(struct scsi_cmnd * cmd)681 static int adpt_abort(struct scsi_cmnd * cmd)
682 {
683 	adpt_hba* pHba = NULL;	/* host bus adapter structure */
684 	struct adpt_device* dptdevice;	/* dpt per device information */
685 	u32 msg[5];
686 	int rcode;
687 
688 	if(cmd->serial_number == 0){
689 		return FAILED;
690 	}
691 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694 		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695 		return FAILED;
696 	}
697 
698 	memset(msg, 0, sizeof(msg));
699 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701 	msg[2] = 0;
702 	msg[3]= 0;
703 	msg[4] = adpt_cmd_to_context(cmd);
704 	if (pHba->host)
705 		spin_lock_irq(pHba->host->host_lock);
706 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707 	if (pHba->host)
708 		spin_unlock_irq(pHba->host->host_lock);
709 	if (rcode != 0) {
710 		if(rcode == -EOPNOTSUPP ){
711 			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712 			return FAILED;
713 		}
714 		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715 		return FAILED;
716 	}
717 	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718 	return SUCCESS;
719 }
720 
721 
722 #define I2O_DEVICE_RESET 0x27
723 // This is the same for BLK and SCSI devices
724 // NOTE this is wrong in the i2o.h definitions
725 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)726 static int adpt_device_reset(struct scsi_cmnd* cmd)
727 {
728 	adpt_hba* pHba;
729 	u32 msg[4];
730 	u32 rcode;
731 	int old_state;
732 	struct adpt_device* d = cmd->device->hostdata;
733 
734 	pHba = (void*) cmd->device->host->hostdata[0];
735 	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736 	if (!d) {
737 		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738 		return FAILED;
739 	}
740 	memset(msg, 0, sizeof(msg));
741 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742 	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743 	msg[2] = 0;
744 	msg[3] = 0;
745 
746 	if (pHba->host)
747 		spin_lock_irq(pHba->host->host_lock);
748 	old_state = d->state;
749 	d->state |= DPTI_DEV_RESET;
750 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751 	d->state = old_state;
752 	if (pHba->host)
753 		spin_unlock_irq(pHba->host->host_lock);
754 	if (rcode != 0) {
755 		if(rcode == -EOPNOTSUPP ){
756 			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757 			return FAILED;
758 		}
759 		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760 		return FAILED;
761 	} else {
762 		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763 		return SUCCESS;
764 	}
765 }
766 
767 
768 #define I2O_HBA_BUS_RESET 0x87
769 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)770 static int adpt_bus_reset(struct scsi_cmnd* cmd)
771 {
772 	adpt_hba* pHba;
773 	u32 msg[4];
774 	u32 rcode;
775 
776 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777 	memset(msg, 0, sizeof(msg));
778 	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780 	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781 	msg[2] = 0;
782 	msg[3] = 0;
783 	if (pHba->host)
784 		spin_lock_irq(pHba->host->host_lock);
785 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786 	if (pHba->host)
787 		spin_unlock_irq(pHba->host->host_lock);
788 	if (rcode != 0) {
789 		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790 		return FAILED;
791 	} else {
792 		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793 		return SUCCESS;
794 	}
795 }
796 
797 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)798 static int __adpt_reset(struct scsi_cmnd* cmd)
799 {
800 	adpt_hba* pHba;
801 	int rcode;
802 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
803 	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
804 	rcode =  adpt_hba_reset(pHba);
805 	if(rcode == 0){
806 		printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
807 		return SUCCESS;
808 	} else {
809 		printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
810 		return FAILED;
811 	}
812 }
813 
adpt_reset(struct scsi_cmnd * cmd)814 static int adpt_reset(struct scsi_cmnd* cmd)
815 {
816 	int rc;
817 
818 	spin_lock_irq(cmd->device->host->host_lock);
819 	rc = __adpt_reset(cmd);
820 	spin_unlock_irq(cmd->device->host->host_lock);
821 
822 	return rc;
823 }
824 
825 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)826 static int adpt_hba_reset(adpt_hba* pHba)
827 {
828 	int rcode;
829 
830 	pHba->state |= DPTI_STATE_RESET;
831 
832 	// Activate does get status , init outbound, and get hrt
833 	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
834 		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
835 		adpt_i2o_delete_hba(pHba);
836 		return rcode;
837 	}
838 
839 	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
840 		adpt_i2o_delete_hba(pHba);
841 		return rcode;
842 	}
843 	PDEBUG("%s: in HOLD state\n",pHba->name);
844 
845 	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
846 		adpt_i2o_delete_hba(pHba);
847 		return rcode;
848 	}
849 	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
850 
851 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
852 		adpt_i2o_delete_hba(pHba);
853 		return rcode;
854 	}
855 
856 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
857 		adpt_i2o_delete_hba(pHba);
858 		return rcode;
859 	}
860 	pHba->state &= ~DPTI_STATE_RESET;
861 
862 	adpt_fail_posted_scbs(pHba);
863 	return 0;	/* return success */
864 }
865 
866 /*===========================================================================
867  *
868  *===========================================================================
869  */
870 
871 
adpt_i2o_sys_shutdown(void)872 static void adpt_i2o_sys_shutdown(void)
873 {
874 	adpt_hba *pHba, *pNext;
875 	struct adpt_i2o_post_wait_data *p1, *old;
876 
877 	 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
878 	 printk(KERN_INFO"   This could take a few minutes if there are many devices attached\n");
879 	/* Delete all IOPs from the controller chain */
880 	/* They should have already been released by the
881 	 * scsi-core
882 	 */
883 	for (pHba = hba_chain; pHba; pHba = pNext) {
884 		pNext = pHba->next;
885 		adpt_i2o_delete_hba(pHba);
886 	}
887 
888 	/* Remove any timedout entries from the wait queue.  */
889 //	spin_lock_irqsave(&adpt_post_wait_lock, flags);
890 	/* Nothing should be outstanding at this point so just
891 	 * free them
892 	 */
893 	for(p1 = adpt_post_wait_queue; p1;) {
894 		old = p1;
895 		p1 = p1->next;
896 		kfree(old);
897 	}
898 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
899 	adpt_post_wait_queue = NULL;
900 
901 	 printk(KERN_INFO "Adaptec I2O controllers down.\n");
902 }
903 
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)904 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
905 {
906 
907 	adpt_hba* pHba = NULL;
908 	adpt_hba* p = NULL;
909 	ulong base_addr0_phys = 0;
910 	ulong base_addr1_phys = 0;
911 	u32 hba_map0_area_size = 0;
912 	u32 hba_map1_area_size = 0;
913 	void __iomem *base_addr_virt = NULL;
914 	void __iomem *msg_addr_virt = NULL;
915 	int dma64 = 0;
916 
917 	int raptorFlag = FALSE;
918 
919 	if(pci_enable_device(pDev)) {
920 		return -EINVAL;
921 	}
922 
923 	if (pci_request_regions(pDev, "dpt_i2o")) {
924 		PERROR("dpti: adpt_config_hba: pci request region failed\n");
925 		return -EINVAL;
926 	}
927 
928 	pci_set_master(pDev);
929 
930 	/*
931 	 *	See if we should enable dma64 mode.
932 	 */
933 	if (sizeof(dma_addr_t) > 4 &&
934 	    pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
935 		if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
936 			dma64 = 1;
937 	}
938 	if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
939 		return -EINVAL;
940 
941 	/* adapter only supports message blocks below 4GB */
942 	pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
943 
944 	base_addr0_phys = pci_resource_start(pDev,0);
945 	hba_map0_area_size = pci_resource_len(pDev,0);
946 
947 	// Check if standard PCI card or single BAR Raptor
948 	if(pDev->device == PCI_DPT_DEVICE_ID){
949 		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
950 			// Raptor card with this device id needs 4M
951 			hba_map0_area_size = 0x400000;
952 		} else { // Not Raptor - it is a PCI card
953 			if(hba_map0_area_size > 0x100000 ){
954 				hba_map0_area_size = 0x100000;
955 			}
956 		}
957 	} else {// Raptor split BAR config
958 		// Use BAR1 in this configuration
959 		base_addr1_phys = pci_resource_start(pDev,1);
960 		hba_map1_area_size = pci_resource_len(pDev,1);
961 		raptorFlag = TRUE;
962 	}
963 
964 #if BITS_PER_LONG == 64
965 	/*
966 	 *	The original Adaptec 64 bit driver has this comment here:
967 	 *	"x86_64 machines need more optimal mappings"
968 	 *
969 	 *	I assume some HBAs report ridiculously large mappings
970 	 *	and we need to limit them on platforms with IOMMUs.
971 	 */
972 	if (raptorFlag == TRUE) {
973 		if (hba_map0_area_size > 128)
974 			hba_map0_area_size = 128;
975 		if (hba_map1_area_size > 524288)
976 			hba_map1_area_size = 524288;
977 	} else {
978 		if (hba_map0_area_size > 524288)
979 			hba_map0_area_size = 524288;
980 	}
981 #endif
982 
983 	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
984 	if (!base_addr_virt) {
985 		pci_release_regions(pDev);
986 		PERROR("dpti: adpt_config_hba: io remap failed\n");
987 		return -EINVAL;
988 	}
989 
990         if(raptorFlag == TRUE) {
991 		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
992 		if (!msg_addr_virt) {
993 			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
994 			iounmap(base_addr_virt);
995 			pci_release_regions(pDev);
996 			return -EINVAL;
997 		}
998 	} else {
999 		msg_addr_virt = base_addr_virt;
1000 	}
1001 
1002 	// Allocate and zero the data structure
1003 	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1004 	if (!pHba) {
1005 		if (msg_addr_virt != base_addr_virt)
1006 			iounmap(msg_addr_virt);
1007 		iounmap(base_addr_virt);
1008 		pci_release_regions(pDev);
1009 		return -ENOMEM;
1010 	}
1011 
1012 	mutex_lock(&adpt_configuration_lock);
1013 
1014 	if(hba_chain != NULL){
1015 		for(p = hba_chain; p->next; p = p->next);
1016 		p->next = pHba;
1017 	} else {
1018 		hba_chain = pHba;
1019 	}
1020 	pHba->next = NULL;
1021 	pHba->unit = hba_count;
1022 	sprintf(pHba->name, "dpti%d", hba_count);
1023 	hba_count++;
1024 
1025 	mutex_unlock(&adpt_configuration_lock);
1026 
1027 	pHba->pDev = pDev;
1028 	pHba->base_addr_phys = base_addr0_phys;
1029 
1030 	// Set up the Virtual Base Address of the I2O Device
1031 	pHba->base_addr_virt = base_addr_virt;
1032 	pHba->msg_addr_virt = msg_addr_virt;
1033 	pHba->irq_mask = base_addr_virt+0x30;
1034 	pHba->post_port = base_addr_virt+0x40;
1035 	pHba->reply_port = base_addr_virt+0x44;
1036 
1037 	pHba->hrt = NULL;
1038 	pHba->lct = NULL;
1039 	pHba->lct_size = 0;
1040 	pHba->status_block = NULL;
1041 	pHba->post_count = 0;
1042 	pHba->state = DPTI_STATE_RESET;
1043 	pHba->pDev = pDev;
1044 	pHba->devices = NULL;
1045 	pHba->dma64 = dma64;
1046 
1047 	// Initializing the spinlocks
1048 	spin_lock_init(&pHba->state_lock);
1049 	spin_lock_init(&adpt_post_wait_lock);
1050 
1051 	if(raptorFlag == 0){
1052 		printk(KERN_INFO "Adaptec I2O RAID controller"
1053 				 " %d at %p size=%x irq=%d%s\n",
1054 			hba_count-1, base_addr_virt,
1055 			hba_map0_area_size, pDev->irq,
1056 			dma64 ? " (64-bit DMA)" : "");
1057 	} else {
1058 		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1059 			hba_count-1, pDev->irq,
1060 			dma64 ? " (64-bit DMA)" : "");
1061 		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1062 		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1063 	}
1064 
1065 	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1066 		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1067 		adpt_i2o_delete_hba(pHba);
1068 		return -EINVAL;
1069 	}
1070 
1071 	return 0;
1072 }
1073 
1074 
adpt_i2o_delete_hba(adpt_hba * pHba)1075 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1076 {
1077 	adpt_hba* p1;
1078 	adpt_hba* p2;
1079 	struct i2o_device* d;
1080 	struct i2o_device* next;
1081 	int i;
1082 	int j;
1083 	struct adpt_device* pDev;
1084 	struct adpt_device* pNext;
1085 
1086 
1087 	mutex_lock(&adpt_configuration_lock);
1088 	// scsi_unregister calls our adpt_release which
1089 	// does a quiese
1090 	if(pHba->host){
1091 		free_irq(pHba->host->irq, pHba);
1092 	}
1093 	p2 = NULL;
1094 	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1095 		if(p1 == pHba) {
1096 			if(p2) {
1097 				p2->next = p1->next;
1098 			} else {
1099 				hba_chain = p1->next;
1100 			}
1101 			break;
1102 		}
1103 	}
1104 
1105 	hba_count--;
1106 	mutex_unlock(&adpt_configuration_lock);
1107 
1108 	iounmap(pHba->base_addr_virt);
1109 	pci_release_regions(pHba->pDev);
1110 	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1111 		iounmap(pHba->msg_addr_virt);
1112 	}
1113 	if(pHba->FwDebugBuffer_P)
1114 	   	iounmap(pHba->FwDebugBuffer_P);
1115 	if(pHba->hrt) {
1116 		dma_free_coherent(&pHba->pDev->dev,
1117 			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1118 			pHba->hrt, pHba->hrt_pa);
1119 	}
1120 	if(pHba->lct) {
1121 		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1122 			pHba->lct, pHba->lct_pa);
1123 	}
1124 	if(pHba->status_block) {
1125 		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1126 			pHba->status_block, pHba->status_block_pa);
1127 	}
1128 	if(pHba->reply_pool) {
1129 		dma_free_coherent(&pHba->pDev->dev,
1130 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1131 			pHba->reply_pool, pHba->reply_pool_pa);
1132 	}
1133 
1134 	for(d = pHba->devices; d ; d = next){
1135 		next = d->next;
1136 		kfree(d);
1137 	}
1138 	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1139 		for(j = 0; j < MAX_ID; j++){
1140 			if(pHba->channel[i].device[j] != NULL){
1141 				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1142 					pNext = pDev->next_lun;
1143 					kfree(pDev);
1144 				}
1145 			}
1146 		}
1147 	}
1148 	pci_dev_put(pHba->pDev);
1149 	if (adpt_sysfs_class)
1150 		device_destroy(adpt_sysfs_class,
1151 				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1152 	kfree(pHba);
1153 
1154 	if(hba_count <= 0){
1155 		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1156 		if (adpt_sysfs_class) {
1157 			class_destroy(adpt_sysfs_class);
1158 			adpt_sysfs_class = NULL;
1159 		}
1160 	}
1161 }
1162 
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1163 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1164 {
1165 	struct adpt_device* d;
1166 
1167 	if(chan < 0 || chan >= MAX_CHANNEL)
1168 		return NULL;
1169 
1170 	if( pHba->channel[chan].device == NULL){
1171 		printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1172 		return NULL;
1173 	}
1174 
1175 	d = pHba->channel[chan].device[id];
1176 	if(!d || d->tid == 0) {
1177 		return NULL;
1178 	}
1179 
1180 	/* If it is the only lun at that address then this should match*/
1181 	if(d->scsi_lun == lun){
1182 		return d;
1183 	}
1184 
1185 	/* else we need to look through all the luns */
1186 	for(d=d->next_lun ; d ; d = d->next_lun){
1187 		if(d->scsi_lun == lun){
1188 			return d;
1189 		}
1190 	}
1191 	return NULL;
1192 }
1193 
1194 
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1195 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1196 {
1197 	// I used my own version of the WAIT_QUEUE_HEAD
1198 	// to handle some version differences
1199 	// When embedded in the kernel this could go back to the vanilla one
1200 	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1201 	int status = 0;
1202 	ulong flags = 0;
1203 	struct adpt_i2o_post_wait_data *p1, *p2;
1204 	struct adpt_i2o_post_wait_data *wait_data =
1205 		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1206 	DECLARE_WAITQUEUE(wait, current);
1207 
1208 	if (!wait_data)
1209 		return -ENOMEM;
1210 
1211 	/*
1212 	 * The spin locking is needed to keep anyone from playing
1213 	 * with the queue pointers and id while we do the same
1214 	 */
1215 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1216        // TODO we need a MORE unique way of getting ids
1217        // to support async LCT get
1218 	wait_data->next = adpt_post_wait_queue;
1219 	adpt_post_wait_queue = wait_data;
1220 	adpt_post_wait_id++;
1221 	adpt_post_wait_id &= 0x7fff;
1222 	wait_data->id =  adpt_post_wait_id;
1223 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1224 
1225 	wait_data->wq = &adpt_wq_i2o_post;
1226 	wait_data->status = -ETIMEDOUT;
1227 
1228 	add_wait_queue(&adpt_wq_i2o_post, &wait);
1229 
1230 	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1231 	timeout *= HZ;
1232 	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1233 		set_current_state(TASK_INTERRUPTIBLE);
1234 		if(pHba->host)
1235 			spin_unlock_irq(pHba->host->host_lock);
1236 		if (!timeout)
1237 			schedule();
1238 		else{
1239 			timeout = schedule_timeout(timeout);
1240 			if (timeout == 0) {
1241 				// I/O issued, but cannot get result in
1242 				// specified time. Freeing resorces is
1243 				// dangerous.
1244 				status = -ETIME;
1245 			}
1246 		}
1247 		if(pHba->host)
1248 			spin_lock_irq(pHba->host->host_lock);
1249 	}
1250 	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1251 
1252 	if(status == -ETIMEDOUT){
1253 		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1254 		// We will have to free the wait_data memory during shutdown
1255 		return status;
1256 	}
1257 
1258 	/* Remove the entry from the queue.  */
1259 	p2 = NULL;
1260 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1261 	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1262 		if(p1 == wait_data) {
1263 			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1264 				status = -EOPNOTSUPP;
1265 			}
1266 			if(p2) {
1267 				p2->next = p1->next;
1268 			} else {
1269 				adpt_post_wait_queue = p1->next;
1270 			}
1271 			break;
1272 		}
1273 	}
1274 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1275 
1276 	kfree(wait_data);
1277 
1278 	return status;
1279 }
1280 
1281 
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1282 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1283 {
1284 
1285 	u32 m = EMPTY_QUEUE;
1286 	u32 __iomem *msg;
1287 	ulong timeout = jiffies + 30*HZ;
1288 	do {
1289 		rmb();
1290 		m = readl(pHba->post_port);
1291 		if (m != EMPTY_QUEUE) {
1292 			break;
1293 		}
1294 		if(time_after(jiffies,timeout)){
1295 			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1296 			return -ETIMEDOUT;
1297 		}
1298 		schedule_timeout_uninterruptible(1);
1299 	} while(m == EMPTY_QUEUE);
1300 
1301 	msg = pHba->msg_addr_virt + m;
1302 	memcpy_toio(msg, data, len);
1303 	wmb();
1304 
1305 	//post message
1306 	writel(m, pHba->post_port);
1307 	wmb();
1308 
1309 	return 0;
1310 }
1311 
1312 
adpt_i2o_post_wait_complete(u32 context,int status)1313 static void adpt_i2o_post_wait_complete(u32 context, int status)
1314 {
1315 	struct adpt_i2o_post_wait_data *p1 = NULL;
1316 	/*
1317 	 * We need to search through the adpt_post_wait
1318 	 * queue to see if the given message is still
1319 	 * outstanding.  If not, it means that the IOP
1320 	 * took longer to respond to the message than we
1321 	 * had allowed and timer has already expired.
1322 	 * Not much we can do about that except log
1323 	 * it for debug purposes, increase timeout, and recompile
1324 	 *
1325 	 * Lock needed to keep anyone from moving queue pointers
1326 	 * around while we're looking through them.
1327 	 */
1328 
1329 	context &= 0x7fff;
1330 
1331 	spin_lock(&adpt_post_wait_lock);
1332 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1333 		if(p1->id == context) {
1334 			p1->status = status;
1335 			spin_unlock(&adpt_post_wait_lock);
1336 			wake_up_interruptible(p1->wq);
1337 			return;
1338 		}
1339 	}
1340 	spin_unlock(&adpt_post_wait_lock);
1341         // If this happens we lose commands that probably really completed
1342 	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1343 	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1344 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1345 		printk(KERN_DEBUG"           %d\n",p1->id);
1346 	}
1347 	return;
1348 }
1349 
adpt_i2o_reset_hba(adpt_hba * pHba)1350 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1351 {
1352 	u32 msg[8];
1353 	u8* status;
1354 	dma_addr_t addr;
1355 	u32 m = EMPTY_QUEUE ;
1356 	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1357 
1358 	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1359 		timeout = jiffies + (25*HZ);
1360 	} else {
1361 		adpt_i2o_quiesce_hba(pHba);
1362 	}
1363 
1364 	do {
1365 		rmb();
1366 		m = readl(pHba->post_port);
1367 		if (m != EMPTY_QUEUE) {
1368 			break;
1369 		}
1370 		if(time_after(jiffies,timeout)){
1371 			printk(KERN_WARNING"Timeout waiting for message!\n");
1372 			return -ETIMEDOUT;
1373 		}
1374 		schedule_timeout_uninterruptible(1);
1375 	} while (m == EMPTY_QUEUE);
1376 
1377 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1378 	if(status == NULL) {
1379 		adpt_send_nop(pHba, m);
1380 		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1381 		return -ENOMEM;
1382 	}
1383 	memset(status,0,4);
1384 
1385 	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1386 	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1387 	msg[2]=0;
1388 	msg[3]=0;
1389 	msg[4]=0;
1390 	msg[5]=0;
1391 	msg[6]=dma_low(addr);
1392 	msg[7]=dma_high(addr);
1393 
1394 	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1395 	wmb();
1396 	writel(m, pHba->post_port);
1397 	wmb();
1398 
1399 	while(*status == 0){
1400 		if(time_after(jiffies,timeout)){
1401 			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1402 			/* We lose 4 bytes of "status" here, but we cannot
1403 			   free these because controller may awake and corrupt
1404 			   those bytes at any time */
1405 			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1406 			return -ETIMEDOUT;
1407 		}
1408 		rmb();
1409 		schedule_timeout_uninterruptible(1);
1410 	}
1411 
1412 	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1413 		PDEBUG("%s: Reset in progress...\n", pHba->name);
1414 		// Here we wait for message frame to become available
1415 		// indicated that reset has finished
1416 		do {
1417 			rmb();
1418 			m = readl(pHba->post_port);
1419 			if (m != EMPTY_QUEUE) {
1420 				break;
1421 			}
1422 			if(time_after(jiffies,timeout)){
1423 				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1424 				/* We lose 4 bytes of "status" here, but we
1425 				   cannot free these because controller may
1426 				   awake and corrupt those bytes at any time */
1427 				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1428 				return -ETIMEDOUT;
1429 			}
1430 			schedule_timeout_uninterruptible(1);
1431 		} while (m == EMPTY_QUEUE);
1432 		// Flush the offset
1433 		adpt_send_nop(pHba, m);
1434 	}
1435 	adpt_i2o_status_get(pHba);
1436 	if(*status == 0x02 ||
1437 			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1438 		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1439 				pHba->name);
1440 	} else {
1441 		PDEBUG("%s: Reset completed.\n", pHba->name);
1442 	}
1443 
1444 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1445 #ifdef UARTDELAY
1446 	// This delay is to allow someone attached to the card through the debug UART to
1447 	// set up the dump levels that they want before the rest of the initialization sequence
1448 	adpt_delay(20000);
1449 #endif
1450 	return 0;
1451 }
1452 
1453 
adpt_i2o_parse_lct(adpt_hba * pHba)1454 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1455 {
1456 	int i;
1457 	int max;
1458 	int tid;
1459 	struct i2o_device *d;
1460 	i2o_lct *lct = pHba->lct;
1461 	u8 bus_no = 0;
1462 	s16 scsi_id;
1463 	u64 scsi_lun;
1464 	u32 buf[10]; // larger than 7, or 8 ...
1465 	struct adpt_device* pDev;
1466 
1467 	if (lct == NULL) {
1468 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1469 		return -1;
1470 	}
1471 
1472 	max = lct->table_size;
1473 	max -= 3;
1474 	max /= 9;
1475 
1476 	for(i=0;i<max;i++) {
1477 		if( lct->lct_entry[i].user_tid != 0xfff){
1478 			/*
1479 			 * If we have hidden devices, we need to inform the upper layers about
1480 			 * the possible maximum id reference to handle device access when
1481 			 * an array is disassembled. This code has no other purpose but to
1482 			 * allow us future access to devices that are currently hidden
1483 			 * behind arrays, hotspares or have not been configured (JBOD mode).
1484 			 */
1485 			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1486 			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1487 			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1488 			    	continue;
1489 			}
1490 			tid = lct->lct_entry[i].tid;
1491 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1492 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1493 				continue;
1494 			}
1495 			bus_no = buf[0]>>16;
1496 			scsi_id = buf[1];
1497 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1498 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1499 				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1500 				continue;
1501 			}
1502 			if (scsi_id >= MAX_ID){
1503 				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1504 				continue;
1505 			}
1506 			if(bus_no > pHba->top_scsi_channel){
1507 				pHba->top_scsi_channel = bus_no;
1508 			}
1509 			if(scsi_id > pHba->top_scsi_id){
1510 				pHba->top_scsi_id = scsi_id;
1511 			}
1512 			if(scsi_lun > pHba->top_scsi_lun){
1513 				pHba->top_scsi_lun = scsi_lun;
1514 			}
1515 			continue;
1516 		}
1517 		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1518 		if(d==NULL)
1519 		{
1520 			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1521 			return -ENOMEM;
1522 		}
1523 
1524 		d->controller = pHba;
1525 		d->next = NULL;
1526 
1527 		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1528 
1529 		d->flags = 0;
1530 		tid = d->lct_data.tid;
1531 		adpt_i2o_report_hba_unit(pHba, d);
1532 		adpt_i2o_install_device(pHba, d);
1533 	}
1534 	bus_no = 0;
1535 	for(d = pHba->devices; d ; d = d->next) {
1536 		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1537 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1538 			tid = d->lct_data.tid;
1539 			// TODO get the bus_no from hrt-but for now they are in order
1540 			//bus_no =
1541 			if(bus_no > pHba->top_scsi_channel){
1542 				pHba->top_scsi_channel = bus_no;
1543 			}
1544 			pHba->channel[bus_no].type = d->lct_data.class_id;
1545 			pHba->channel[bus_no].tid = tid;
1546 			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1547 			{
1548 				pHba->channel[bus_no].scsi_id = buf[1];
1549 				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1550 			}
1551 			// TODO remove - this is just until we get from hrt
1552 			bus_no++;
1553 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1554 				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1555 				break;
1556 			}
1557 		}
1558 	}
1559 
1560 	// Setup adpt_device table
1561 	for(d = pHba->devices; d ; d = d->next) {
1562 		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1563 		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1564 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1565 
1566 			tid = d->lct_data.tid;
1567 			scsi_id = -1;
1568 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1569 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1570 				bus_no = buf[0]>>16;
1571 				scsi_id = buf[1];
1572 				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1573 				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1574 					continue;
1575 				}
1576 				if (scsi_id >= MAX_ID) {
1577 					continue;
1578 				}
1579 				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1580 					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1581 					if(pDev == NULL) {
1582 						return -ENOMEM;
1583 					}
1584 					pHba->channel[bus_no].device[scsi_id] = pDev;
1585 				} else {
1586 					for( pDev = pHba->channel[bus_no].device[scsi_id];
1587 							pDev->next_lun; pDev = pDev->next_lun){
1588 					}
1589 					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1590 					if(pDev->next_lun == NULL) {
1591 						return -ENOMEM;
1592 					}
1593 					pDev = pDev->next_lun;
1594 				}
1595 				pDev->tid = tid;
1596 				pDev->scsi_channel = bus_no;
1597 				pDev->scsi_id = scsi_id;
1598 				pDev->scsi_lun = scsi_lun;
1599 				pDev->pI2o_dev = d;
1600 				d->owner = pDev;
1601 				pDev->type = (buf[0])&0xff;
1602 				pDev->flags = (buf[0]>>8)&0xff;
1603 				if(scsi_id > pHba->top_scsi_id){
1604 					pHba->top_scsi_id = scsi_id;
1605 				}
1606 				if(scsi_lun > pHba->top_scsi_lun){
1607 					pHba->top_scsi_lun = scsi_lun;
1608 				}
1609 			}
1610 			if(scsi_id == -1){
1611 				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1612 						d->lct_data.identity_tag);
1613 			}
1614 		}
1615 	}
1616 	return 0;
1617 }
1618 
1619 
1620 /*
1621  *	Each I2O controller has a chain of devices on it - these match
1622  *	the useful parts of the LCT of the board.
1623  */
1624 
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1625 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1626 {
1627 	mutex_lock(&adpt_configuration_lock);
1628 	d->controller=pHba;
1629 	d->owner=NULL;
1630 	d->next=pHba->devices;
1631 	d->prev=NULL;
1632 	if (pHba->devices != NULL){
1633 		pHba->devices->prev=d;
1634 	}
1635 	pHba->devices=d;
1636 	*d->dev_name = 0;
1637 
1638 	mutex_unlock(&adpt_configuration_lock);
1639 	return 0;
1640 }
1641 
adpt_open(struct inode * inode,struct file * file)1642 static int adpt_open(struct inode *inode, struct file *file)
1643 {
1644 	int minor;
1645 	adpt_hba* pHba;
1646 
1647 	mutex_lock(&adpt_mutex);
1648 	//TODO check for root access
1649 	//
1650 	minor = iminor(inode);
1651 	if (minor >= hba_count) {
1652 		mutex_unlock(&adpt_mutex);
1653 		return -ENXIO;
1654 	}
1655 	mutex_lock(&adpt_configuration_lock);
1656 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1657 		if (pHba->unit == minor) {
1658 			break;	/* found adapter */
1659 		}
1660 	}
1661 	if (pHba == NULL) {
1662 		mutex_unlock(&adpt_configuration_lock);
1663 		mutex_unlock(&adpt_mutex);
1664 		return -ENXIO;
1665 	}
1666 
1667 //	if(pHba->in_use){
1668 	//	mutex_unlock(&adpt_configuration_lock);
1669 //		return -EBUSY;
1670 //	}
1671 
1672 	pHba->in_use = 1;
1673 	mutex_unlock(&adpt_configuration_lock);
1674 	mutex_unlock(&adpt_mutex);
1675 
1676 	return 0;
1677 }
1678 
adpt_close(struct inode * inode,struct file * file)1679 static int adpt_close(struct inode *inode, struct file *file)
1680 {
1681 	int minor;
1682 	adpt_hba* pHba;
1683 
1684 	minor = iminor(inode);
1685 	if (minor >= hba_count) {
1686 		return -ENXIO;
1687 	}
1688 	mutex_lock(&adpt_configuration_lock);
1689 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1690 		if (pHba->unit == minor) {
1691 			break;	/* found adapter */
1692 		}
1693 	}
1694 	mutex_unlock(&adpt_configuration_lock);
1695 	if (pHba == NULL) {
1696 		return -ENXIO;
1697 	}
1698 
1699 	pHba->in_use = 0;
1700 
1701 	return 0;
1702 }
1703 
1704 
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1705 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1706 {
1707 	u32 msg[MAX_MESSAGE_SIZE];
1708 	u32* reply = NULL;
1709 	u32 size = 0;
1710 	u32 reply_size = 0;
1711 	u32 __user *user_msg = arg;
1712 	u32 __user * user_reply = NULL;
1713 	void *sg_list[pHba->sg_tablesize];
1714 	u32 sg_offset = 0;
1715 	u32 sg_count = 0;
1716 	int sg_index = 0;
1717 	u32 i = 0;
1718 	u32 rcode = 0;
1719 	void *p = NULL;
1720 	dma_addr_t addr;
1721 	ulong flags = 0;
1722 
1723 	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1724 	// get user msg size in u32s
1725 	if(get_user(size, &user_msg[0])){
1726 		return -EFAULT;
1727 	}
1728 	size = size>>16;
1729 
1730 	user_reply = &user_msg[size];
1731 	if(size > MAX_MESSAGE_SIZE){
1732 		return -EFAULT;
1733 	}
1734 	size *= 4; // Convert to bytes
1735 
1736 	/* Copy in the user's I2O command */
1737 	if(copy_from_user(msg, user_msg, size)) {
1738 		return -EFAULT;
1739 	}
1740 	get_user(reply_size, &user_reply[0]);
1741 	reply_size = reply_size>>16;
1742 	if(reply_size > REPLY_FRAME_SIZE){
1743 		reply_size = REPLY_FRAME_SIZE;
1744 	}
1745 	reply_size *= 4;
1746 	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1747 	if(reply == NULL) {
1748 		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1749 		return -ENOMEM;
1750 	}
1751 	sg_offset = (msg[0]>>4)&0xf;
1752 	msg[2] = 0x40000000; // IOCTL context
1753 	msg[3] = adpt_ioctl_to_context(pHba, reply);
1754 	if (msg[3] == (u32)-1)
1755 		return -EBUSY;
1756 
1757 	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1758 	if(sg_offset) {
1759 		// TODO add 64 bit API
1760 		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1761 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1762 		if (sg_count > pHba->sg_tablesize){
1763 			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1764 			kfree (reply);
1765 			return -EINVAL;
1766 		}
1767 
1768 		for(i = 0; i < sg_count; i++) {
1769 			int sg_size;
1770 
1771 			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1772 				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1773 				rcode = -EINVAL;
1774 				goto cleanup;
1775 			}
1776 			sg_size = sg[i].flag_count & 0xffffff;
1777 			/* Allocate memory for the transfer */
1778 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1779 			if(!p) {
1780 				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1781 						pHba->name,sg_size,i,sg_count);
1782 				rcode = -ENOMEM;
1783 				goto cleanup;
1784 			}
1785 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1786 			/* Copy in the user's SG buffer if necessary */
1787 			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1788 				// sg_simple_element API is 32 bit
1789 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1790 					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1791 					rcode = -EFAULT;
1792 					goto cleanup;
1793 				}
1794 			}
1795 			/* sg_simple_element API is 32 bit, but addr < 4GB */
1796 			sg[i].addr_bus = addr;
1797 		}
1798 	}
1799 
1800 	do {
1801 		/*
1802 		 * Stop any new commands from enterring the
1803 		 * controller while processing the ioctl
1804 		 */
1805 		if (pHba->host) {
1806 			scsi_block_requests(pHba->host);
1807 			spin_lock_irqsave(pHba->host->host_lock, flags);
1808 		}
1809 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1810 		if (rcode != 0)
1811 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1812 					rcode, reply);
1813 		if (pHba->host) {
1814 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1815 			scsi_unblock_requests(pHba->host);
1816 		}
1817 	} while (rcode == -ETIMEDOUT);
1818 
1819 	if(rcode){
1820 		goto cleanup;
1821 	}
1822 
1823 	if(sg_offset) {
1824 	/* Copy back the Scatter Gather buffers back to user space */
1825 		u32 j;
1826 		// TODO add 64 bit API
1827 		struct sg_simple_element* sg;
1828 		int sg_size;
1829 
1830 		// re-acquire the original message to handle correctly the sg copy operation
1831 		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1832 		// get user msg size in u32s
1833 		if(get_user(size, &user_msg[0])){
1834 			rcode = -EFAULT;
1835 			goto cleanup;
1836 		}
1837 		size = size>>16;
1838 		size *= 4;
1839 		if (size > MAX_MESSAGE_SIZE) {
1840 			rcode = -EINVAL;
1841 			goto cleanup;
1842 		}
1843 		/* Copy in the user's I2O command */
1844 		if (copy_from_user (msg, user_msg, size)) {
1845 			rcode = -EFAULT;
1846 			goto cleanup;
1847 		}
1848 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1849 
1850 		// TODO add 64 bit API
1851 		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1852 		for (j = 0; j < sg_count; j++) {
1853 			/* Copy out the SG list to user's buffer if necessary */
1854 			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1855 				sg_size = sg[j].flag_count & 0xffffff;
1856 				// sg_simple_element API is 32 bit
1857 				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1858 					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1859 					rcode = -EFAULT;
1860 					goto cleanup;
1861 				}
1862 			}
1863 		}
1864 	}
1865 
1866 	/* Copy back the reply to user space */
1867 	if (reply_size) {
1868 		// we wrote our own values for context - now restore the user supplied ones
1869 		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1870 			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1871 			rcode = -EFAULT;
1872 		}
1873 		if(copy_to_user(user_reply, reply, reply_size)) {
1874 			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1875 			rcode = -EFAULT;
1876 		}
1877 	}
1878 
1879 
1880 cleanup:
1881 	if (rcode != -ETIME && rcode != -EINTR) {
1882 		struct sg_simple_element *sg =
1883 				(struct sg_simple_element*) (msg +sg_offset);
1884 		kfree (reply);
1885 		while(sg_index) {
1886 			if(sg_list[--sg_index]) {
1887 				dma_free_coherent(&pHba->pDev->dev,
1888 					sg[sg_index].flag_count & 0xffffff,
1889 					sg_list[sg_index],
1890 					sg[sg_index].addr_bus);
1891 			}
1892 		}
1893 	}
1894 	return rcode;
1895 }
1896 
1897 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1898 static void adpt_ia64_info(sysInfo_S* si)
1899 {
1900 	// This is all the info we need for now
1901 	// We will add more info as our new
1902 	// managmenent utility requires it
1903 	si->processorType = PROC_IA64;
1904 }
1905 #endif
1906 
1907 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1908 static void adpt_sparc_info(sysInfo_S* si)
1909 {
1910 	// This is all the info we need for now
1911 	// We will add more info as our new
1912 	// managmenent utility requires it
1913 	si->processorType = PROC_ULTRASPARC;
1914 }
1915 #endif
1916 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1917 static void adpt_alpha_info(sysInfo_S* si)
1918 {
1919 	// This is all the info we need for now
1920 	// We will add more info as our new
1921 	// managmenent utility requires it
1922 	si->processorType = PROC_ALPHA;
1923 }
1924 #endif
1925 
1926 #if defined __i386__
adpt_i386_info(sysInfo_S * si)1927 static void adpt_i386_info(sysInfo_S* si)
1928 {
1929 	// This is all the info we need for now
1930 	// We will add more info as our new
1931 	// managmenent utility requires it
1932 	switch (boot_cpu_data.x86) {
1933 	case CPU_386:
1934 		si->processorType = PROC_386;
1935 		break;
1936 	case CPU_486:
1937 		si->processorType = PROC_486;
1938 		break;
1939 	case CPU_586:
1940 		si->processorType = PROC_PENTIUM;
1941 		break;
1942 	default:  // Just in case
1943 		si->processorType = PROC_PENTIUM;
1944 		break;
1945 	}
1946 }
1947 #endif
1948 
1949 /*
1950  * This routine returns information about the system.  This does not effect
1951  * any logic and if the info is wrong - it doesn't matter.
1952  */
1953 
1954 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1955 static int adpt_system_info(void __user *buffer)
1956 {
1957 	sysInfo_S si;
1958 
1959 	memset(&si, 0, sizeof(si));
1960 
1961 	si.osType = OS_LINUX;
1962 	si.osMajorVersion = 0;
1963 	si.osMinorVersion = 0;
1964 	si.osRevision = 0;
1965 	si.busType = SI_PCI_BUS;
1966 	si.processorFamily = DPTI_sig.dsProcessorFamily;
1967 
1968 #if defined __i386__
1969 	adpt_i386_info(&si);
1970 #elif defined (__ia64__)
1971 	adpt_ia64_info(&si);
1972 #elif defined(__sparc__)
1973 	adpt_sparc_info(&si);
1974 #elif defined (__alpha__)
1975 	adpt_alpha_info(&si);
1976 #else
1977 	si.processorType = 0xff ;
1978 #endif
1979 	if (copy_to_user(buffer, &si, sizeof(si))){
1980 		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1981 		return -EFAULT;
1982 	}
1983 
1984 	return 0;
1985 }
1986 
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1987 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1988 {
1989 	int minor;
1990 	int error = 0;
1991 	adpt_hba* pHba;
1992 	ulong flags = 0;
1993 	void __user *argp = (void __user *)arg;
1994 
1995 	minor = iminor(inode);
1996 	if (minor >= DPTI_MAX_HBA){
1997 		return -ENXIO;
1998 	}
1999 	mutex_lock(&adpt_configuration_lock);
2000 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
2001 		if (pHba->unit == minor) {
2002 			break;	/* found adapter */
2003 		}
2004 	}
2005 	mutex_unlock(&adpt_configuration_lock);
2006 	if(pHba == NULL){
2007 		return -ENXIO;
2008 	}
2009 
2010 	while((volatile u32) pHba->state & DPTI_STATE_RESET )
2011 		schedule_timeout_uninterruptible(2);
2012 
2013 	switch (cmd) {
2014 	// TODO: handle 3 cases
2015 	case DPT_SIGNATURE:
2016 		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2017 			return -EFAULT;
2018 		}
2019 		break;
2020 	case I2OUSRCMD:
2021 		return adpt_i2o_passthru(pHba, argp);
2022 
2023 	case DPT_CTRLINFO:{
2024 		drvrHBAinfo_S HbaInfo;
2025 
2026 #define FLG_OSD_PCI_VALID 0x0001
2027 #define FLG_OSD_DMA	  0x0002
2028 #define FLG_OSD_I2O	  0x0004
2029 		memset(&HbaInfo, 0, sizeof(HbaInfo));
2030 		HbaInfo.drvrHBAnum = pHba->unit;
2031 		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2032 		HbaInfo.blinkState = adpt_read_blink_led(pHba);
2033 		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
2034 		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2035 		HbaInfo.Interrupt = pHba->pDev->irq;
2036 		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2037 		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2038 			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2039 			return -EFAULT;
2040 		}
2041 		break;
2042 		}
2043 	case DPT_SYSINFO:
2044 		return adpt_system_info(argp);
2045 	case DPT_BLINKLED:{
2046 		u32 value;
2047 		value = (u32)adpt_read_blink_led(pHba);
2048 		if (copy_to_user(argp, &value, sizeof(value))) {
2049 			return -EFAULT;
2050 		}
2051 		break;
2052 		}
2053 	case I2ORESETCMD:
2054 		if(pHba->host)
2055 			spin_lock_irqsave(pHba->host->host_lock, flags);
2056 		adpt_hba_reset(pHba);
2057 		if(pHba->host)
2058 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
2059 		break;
2060 	case I2ORESCANCMD:
2061 		adpt_rescan(pHba);
2062 		break;
2063 	default:
2064 		return -EINVAL;
2065 	}
2066 
2067 	return error;
2068 }
2069 
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2070 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2071 {
2072 	struct inode *inode;
2073 	long ret;
2074 
2075 	inode = file_inode(file);
2076 
2077 	mutex_lock(&adpt_mutex);
2078 	ret = adpt_ioctl(inode, file, cmd, arg);
2079 	mutex_unlock(&adpt_mutex);
2080 
2081 	return ret;
2082 }
2083 
2084 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2085 static long compat_adpt_ioctl(struct file *file,
2086 				unsigned int cmd, unsigned long arg)
2087 {
2088 	struct inode *inode;
2089 	long ret;
2090 
2091 	inode = file_inode(file);
2092 
2093 	mutex_lock(&adpt_mutex);
2094 
2095 	switch(cmd) {
2096 		case DPT_SIGNATURE:
2097 		case I2OUSRCMD:
2098 		case DPT_CTRLINFO:
2099 		case DPT_SYSINFO:
2100 		case DPT_BLINKLED:
2101 		case I2ORESETCMD:
2102 		case I2ORESCANCMD:
2103 		case (DPT_TARGET_BUSY & 0xFFFF):
2104 		case DPT_TARGET_BUSY:
2105 			ret = adpt_ioctl(inode, file, cmd, arg);
2106 			break;
2107 		default:
2108 			ret =  -ENOIOCTLCMD;
2109 	}
2110 
2111 	mutex_unlock(&adpt_mutex);
2112 
2113 	return ret;
2114 }
2115 #endif
2116 
adpt_isr(int irq,void * dev_id)2117 static irqreturn_t adpt_isr(int irq, void *dev_id)
2118 {
2119 	struct scsi_cmnd* cmd;
2120 	adpt_hba* pHba = dev_id;
2121 	u32 m;
2122 	void __iomem *reply;
2123 	u32 status=0;
2124 	u32 context;
2125 	ulong flags = 0;
2126 	int handled = 0;
2127 
2128 	if (pHba == NULL){
2129 		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2130 		return IRQ_NONE;
2131 	}
2132 	if(pHba->host)
2133 		spin_lock_irqsave(pHba->host->host_lock, flags);
2134 
2135 	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2136 		m = readl(pHba->reply_port);
2137 		if(m == EMPTY_QUEUE){
2138 			// Try twice then give up
2139 			rmb();
2140 			m = readl(pHba->reply_port);
2141 			if(m == EMPTY_QUEUE){
2142 				// This really should not happen
2143 				printk(KERN_ERR"dpti: Could not get reply frame\n");
2144 				goto out;
2145 			}
2146 		}
2147 		if (pHba->reply_pool_pa <= m &&
2148 		    m < pHba->reply_pool_pa +
2149 			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2150 			reply = (u8 *)pHba->reply_pool +
2151 						(m - pHba->reply_pool_pa);
2152 		} else {
2153 			/* Ick, we should *never* be here */
2154 			printk(KERN_ERR "dpti: reply frame not from pool\n");
2155 			reply = (u8 *)bus_to_virt(m);
2156 		}
2157 
2158 		if (readl(reply) & MSG_FAIL) {
2159 			u32 old_m = readl(reply+28);
2160 			void __iomem *msg;
2161 			u32 old_context;
2162 			PDEBUG("%s: Failed message\n",pHba->name);
2163 			if(old_m >= 0x100000){
2164 				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2165 				writel(m,pHba->reply_port);
2166 				continue;
2167 			}
2168 			// Transaction context is 0 in failed reply frame
2169 			msg = pHba->msg_addr_virt + old_m;
2170 			old_context = readl(msg+12);
2171 			writel(old_context, reply+12);
2172 			adpt_send_nop(pHba, old_m);
2173 		}
2174 		context = readl(reply+8);
2175 		if(context & 0x40000000){ // IOCTL
2176 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2177 			if( p != NULL) {
2178 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2179 			}
2180 			// All IOCTLs will also be post wait
2181 		}
2182 		if(context & 0x80000000){ // Post wait message
2183 			status = readl(reply+16);
2184 			if(status  >> 24){
2185 				status &=  0xffff; /* Get detail status */
2186 			} else {
2187 				status = I2O_POST_WAIT_OK;
2188 			}
2189 			if(!(context & 0x40000000)) {
2190 				cmd = adpt_cmd_from_context(pHba,
2191 							readl(reply+12));
2192 				if(cmd != NULL) {
2193 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2194 				}
2195 			}
2196 			adpt_i2o_post_wait_complete(context, status);
2197 		} else { // SCSI message
2198 			cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2199 			if(cmd != NULL){
2200 				scsi_dma_unmap(cmd);
2201 				if(cmd->serial_number != 0) { // If not timedout
2202 					adpt_i2o_to_scsi(reply, cmd);
2203 				}
2204 			}
2205 		}
2206 		writel(m, pHba->reply_port);
2207 		wmb();
2208 		rmb();
2209 	}
2210 	handled = 1;
2211 out:	if(pHba->host)
2212 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2213 	return IRQ_RETVAL(handled);
2214 }
2215 
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2216 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2217 {
2218 	int i;
2219 	u32 msg[MAX_MESSAGE_SIZE];
2220 	u32* mptr;
2221 	u32* lptr;
2222 	u32 *lenptr;
2223 	int direction;
2224 	int scsidir;
2225 	int nseg;
2226 	u32 len;
2227 	u32 reqlen;
2228 	s32 rcode;
2229 	dma_addr_t addr;
2230 
2231 	memset(msg, 0 , sizeof(msg));
2232 	len = scsi_bufflen(cmd);
2233 	direction = 0x00000000;
2234 
2235 	scsidir = 0x00000000;			// DATA NO XFER
2236 	if(len) {
2237 		/*
2238 		 * Set SCBFlags to indicate if data is being transferred
2239 		 * in or out, or no data transfer
2240 		 * Note:  Do not have to verify index is less than 0 since
2241 		 * cmd->cmnd[0] is an unsigned char
2242 		 */
2243 		switch(cmd->sc_data_direction){
2244 		case DMA_FROM_DEVICE:
2245 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2246 			break;
2247 		case DMA_TO_DEVICE:
2248 			direction=0x04000000;	// SGL OUT
2249 			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2250 			break;
2251 		case DMA_NONE:
2252 			break;
2253 		case DMA_BIDIRECTIONAL:
2254 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2255 			// Assume In - and continue;
2256 			break;
2257 		default:
2258 			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2259 			     pHba->name, cmd->cmnd[0]);
2260 			cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2261 			cmd->scsi_done(cmd);
2262 			return 	0;
2263 		}
2264 	}
2265 	// msg[0] is set later
2266 	// I2O_CMD_SCSI_EXEC
2267 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2268 	msg[2] = 0;
2269 	msg[3] = adpt_cmd_to_context(cmd);  /* Want SCSI control block back */
2270 	// Our cards use the transaction context as the tag for queueing
2271 	// Adaptec/DPT Private stuff
2272 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2273 	msg[5] = d->tid;
2274 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2275 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2276 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2277 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2278 	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2279 
2280 	mptr=msg+7;
2281 
2282 	// Write SCSI command into the message - always 16 byte block
2283 	memset(mptr, 0,  16);
2284 	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2285 	mptr+=4;
2286 	lenptr=mptr++;		/* Remember me - fill in when we know */
2287 	if (dpt_dma64(pHba)) {
2288 		reqlen = 16;		// SINGLE SGE
2289 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2290 		*mptr++ = 1 << PAGE_SHIFT;
2291 	} else {
2292 		reqlen = 14;		// SINGLE SGE
2293 	}
2294 	/* Now fill in the SGList and command */
2295 
2296 	nseg = scsi_dma_map(cmd);
2297 	BUG_ON(nseg < 0);
2298 	if (nseg) {
2299 		struct scatterlist *sg;
2300 
2301 		len = 0;
2302 		scsi_for_each_sg(cmd, sg, nseg, i) {
2303 			lptr = mptr;
2304 			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2305 			len+=sg_dma_len(sg);
2306 			addr = sg_dma_address(sg);
2307 			*mptr++ = dma_low(addr);
2308 			if (dpt_dma64(pHba))
2309 				*mptr++ = dma_high(addr);
2310 			/* Make this an end of list */
2311 			if (i == nseg - 1)
2312 				*lptr = direction|0xD0000000|sg_dma_len(sg);
2313 		}
2314 		reqlen = mptr - msg;
2315 		*lenptr = len;
2316 
2317 		if(cmd->underflow && len != cmd->underflow){
2318 			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2319 				len, cmd->underflow);
2320 		}
2321 	} else {
2322 		*lenptr = len = 0;
2323 		reqlen = 12;
2324 	}
2325 
2326 	/* Stick the headers on */
2327 	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2328 
2329 	// Send it on it's way
2330 	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2331 	if (rcode == 0) {
2332 		return 0;
2333 	}
2334 	return rcode;
2335 }
2336 
2337 
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2338 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2339 {
2340 	struct Scsi_Host *host;
2341 
2342 	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2343 	if (host == NULL) {
2344 		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2345 		return -1;
2346 	}
2347 	host->hostdata[0] = (unsigned long)pHba;
2348 	pHba->host = host;
2349 
2350 	host->irq = pHba->pDev->irq;
2351 	/* no IO ports, so don't have to set host->io_port and
2352 	 * host->n_io_port
2353 	 */
2354 	host->io_port = 0;
2355 	host->n_io_port = 0;
2356 				/* see comments in scsi_host.h */
2357 	host->max_id = 16;
2358 	host->max_lun = 256;
2359 	host->max_channel = pHba->top_scsi_channel + 1;
2360 	host->cmd_per_lun = 1;
2361 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2362 	host->sg_tablesize = pHba->sg_tablesize;
2363 	host->can_queue = pHba->post_fifo_size;
2364 	host->use_cmd_list = 1;
2365 
2366 	return 0;
2367 }
2368 
2369 
adpt_i2o_to_scsi(void __iomem * reply,struct scsi_cmnd * cmd)2370 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2371 {
2372 	adpt_hba* pHba;
2373 	u32 hba_status;
2374 	u32 dev_status;
2375 	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2376 	// I know this would look cleaner if I just read bytes
2377 	// but the model I have been using for all the rest of the
2378 	// io is in 4 byte words - so I keep that model
2379 	u16 detailed_status = readl(reply+16) &0xffff;
2380 	dev_status = (detailed_status & 0xff);
2381 	hba_status = detailed_status >> 8;
2382 
2383 	// calculate resid for sg
2384 	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2385 
2386 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2387 
2388 	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2389 
2390 	if(!(reply_flags & MSG_FAIL)) {
2391 		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2392 		case I2O_SCSI_DSC_SUCCESS:
2393 			cmd->result = (DID_OK << 16);
2394 			// handle underflow
2395 			if (readl(reply+20) < cmd->underflow) {
2396 				cmd->result = (DID_ERROR <<16);
2397 				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2398 			}
2399 			break;
2400 		case I2O_SCSI_DSC_REQUEST_ABORTED:
2401 			cmd->result = (DID_ABORT << 16);
2402 			break;
2403 		case I2O_SCSI_DSC_PATH_INVALID:
2404 		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2405 		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2406 		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2407 		case I2O_SCSI_DSC_NO_ADAPTER:
2408 		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2409 			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2410 				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2411 			cmd->result = (DID_TIME_OUT << 16);
2412 			break;
2413 		case I2O_SCSI_DSC_ADAPTER_BUSY:
2414 		case I2O_SCSI_DSC_BUS_BUSY:
2415 			cmd->result = (DID_BUS_BUSY << 16);
2416 			break;
2417 		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2418 		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2419 			cmd->result = (DID_RESET << 16);
2420 			break;
2421 		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2422 			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2423 			cmd->result = (DID_PARITY << 16);
2424 			break;
2425 		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2426 		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2427 		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2428 		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2429 		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2430 		case I2O_SCSI_DSC_DATA_OVERRUN:
2431 		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2432 		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2433 		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2434 		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2435 		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2436 		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2437 		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2438 		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2439 		case I2O_SCSI_DSC_INVALID_CDB:
2440 		case I2O_SCSI_DSC_LUN_INVALID:
2441 		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2442 		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2443 		case I2O_SCSI_DSC_NO_NEXUS:
2444 		case I2O_SCSI_DSC_CDB_RECEIVED:
2445 		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2446 		case I2O_SCSI_DSC_QUEUE_FROZEN:
2447 		case I2O_SCSI_DSC_REQUEST_INVALID:
2448 		default:
2449 			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2450 				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2451 			       hba_status, dev_status, cmd->cmnd[0]);
2452 			cmd->result = (DID_ERROR << 16);
2453 			break;
2454 		}
2455 
2456 		// copy over the request sense data if it was a check
2457 		// condition status
2458 		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2459 			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2460 			// Copy over the sense data
2461 			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2462 			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2463 			   cmd->sense_buffer[2] == DATA_PROTECT ){
2464 				/* This is to handle an array failed */
2465 				cmd->result = (DID_TIME_OUT << 16);
2466 				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2467 					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2468 					hba_status, dev_status, cmd->cmnd[0]);
2469 
2470 			}
2471 		}
2472 	} else {
2473 		/* In this condtion we could not talk to the tid
2474 		 * the card rejected it.  We should signal a retry
2475 		 * for a limitted number of retries.
2476 		 */
2477 		cmd->result = (DID_TIME_OUT << 16);
2478 		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2479 			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2480 			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2481 	}
2482 
2483 	cmd->result |= (dev_status);
2484 
2485 	if(cmd->scsi_done != NULL){
2486 		cmd->scsi_done(cmd);
2487 	}
2488 	return cmd->result;
2489 }
2490 
2491 
adpt_rescan(adpt_hba * pHba)2492 static s32 adpt_rescan(adpt_hba* pHba)
2493 {
2494 	s32 rcode;
2495 	ulong flags = 0;
2496 
2497 	if(pHba->host)
2498 		spin_lock_irqsave(pHba->host->host_lock, flags);
2499 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2500 		goto out;
2501 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2502 		goto out;
2503 	rcode = 0;
2504 out:	if(pHba->host)
2505 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2506 	return rcode;
2507 }
2508 
2509 
adpt_i2o_reparse_lct(adpt_hba * pHba)2510 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2511 {
2512 	int i;
2513 	int max;
2514 	int tid;
2515 	struct i2o_device *d;
2516 	i2o_lct *lct = pHba->lct;
2517 	u8 bus_no = 0;
2518 	s16 scsi_id;
2519 	u64 scsi_lun;
2520 	u32 buf[10]; // at least 8 u32's
2521 	struct adpt_device* pDev = NULL;
2522 	struct i2o_device* pI2o_dev = NULL;
2523 
2524 	if (lct == NULL) {
2525 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2526 		return -1;
2527 	}
2528 
2529 	max = lct->table_size;
2530 	max -= 3;
2531 	max /= 9;
2532 
2533 	// Mark each drive as unscanned
2534 	for (d = pHba->devices; d; d = d->next) {
2535 		pDev =(struct adpt_device*) d->owner;
2536 		if(!pDev){
2537 			continue;
2538 		}
2539 		pDev->state |= DPTI_DEV_UNSCANNED;
2540 	}
2541 
2542 	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2543 
2544 	for(i=0;i<max;i++) {
2545 		if( lct->lct_entry[i].user_tid != 0xfff){
2546 			continue;
2547 		}
2548 
2549 		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2550 		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2551 		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2552 			tid = lct->lct_entry[i].tid;
2553 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2554 				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2555 				continue;
2556 			}
2557 			bus_no = buf[0]>>16;
2558 			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2559 				printk(KERN_WARNING
2560 					"%s: Channel number %d out of range\n",
2561 					pHba->name, bus_no);
2562 				continue;
2563 			}
2564 
2565 			scsi_id = buf[1];
2566 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2567 			pDev = pHba->channel[bus_no].device[scsi_id];
2568 			/* da lun */
2569 			while(pDev) {
2570 				if(pDev->scsi_lun == scsi_lun) {
2571 					break;
2572 				}
2573 				pDev = pDev->next_lun;
2574 			}
2575 			if(!pDev ) { // Something new add it
2576 				d = kmalloc(sizeof(struct i2o_device),
2577 					    GFP_ATOMIC);
2578 				if(d==NULL)
2579 				{
2580 					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2581 					return -ENOMEM;
2582 				}
2583 
2584 				d->controller = pHba;
2585 				d->next = NULL;
2586 
2587 				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2588 
2589 				d->flags = 0;
2590 				adpt_i2o_report_hba_unit(pHba, d);
2591 				adpt_i2o_install_device(pHba, d);
2592 
2593 				pDev = pHba->channel[bus_no].device[scsi_id];
2594 				if( pDev == NULL){
2595 					pDev =
2596 					  kzalloc(sizeof(struct adpt_device),
2597 						  GFP_ATOMIC);
2598 					if(pDev == NULL) {
2599 						return -ENOMEM;
2600 					}
2601 					pHba->channel[bus_no].device[scsi_id] = pDev;
2602 				} else {
2603 					while (pDev->next_lun) {
2604 						pDev = pDev->next_lun;
2605 					}
2606 					pDev = pDev->next_lun =
2607 					  kzalloc(sizeof(struct adpt_device),
2608 						  GFP_ATOMIC);
2609 					if(pDev == NULL) {
2610 						return -ENOMEM;
2611 					}
2612 				}
2613 				pDev->tid = d->lct_data.tid;
2614 				pDev->scsi_channel = bus_no;
2615 				pDev->scsi_id = scsi_id;
2616 				pDev->scsi_lun = scsi_lun;
2617 				pDev->pI2o_dev = d;
2618 				d->owner = pDev;
2619 				pDev->type = (buf[0])&0xff;
2620 				pDev->flags = (buf[0]>>8)&0xff;
2621 				// Too late, SCSI system has made up it's mind, but what the hey ...
2622 				if(scsi_id > pHba->top_scsi_id){
2623 					pHba->top_scsi_id = scsi_id;
2624 				}
2625 				if(scsi_lun > pHba->top_scsi_lun){
2626 					pHba->top_scsi_lun = scsi_lun;
2627 				}
2628 				continue;
2629 			} // end of new i2o device
2630 
2631 			// We found an old device - check it
2632 			while(pDev) {
2633 				if(pDev->scsi_lun == scsi_lun) {
2634 					if(!scsi_device_online(pDev->pScsi_dev)) {
2635 						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2636 								pHba->name,bus_no,scsi_id,scsi_lun);
2637 						if (pDev->pScsi_dev) {
2638 							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2639 						}
2640 					}
2641 					d = pDev->pI2o_dev;
2642 					if(d->lct_data.tid != tid) { // something changed
2643 						pDev->tid = tid;
2644 						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2645 						if (pDev->pScsi_dev) {
2646 							pDev->pScsi_dev->changed = TRUE;
2647 							pDev->pScsi_dev->removable = TRUE;
2648 						}
2649 					}
2650 					// Found it - mark it scanned
2651 					pDev->state = DPTI_DEV_ONLINE;
2652 					break;
2653 				}
2654 				pDev = pDev->next_lun;
2655 			}
2656 		}
2657 	}
2658 	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2659 		pDev =(struct adpt_device*) pI2o_dev->owner;
2660 		if(!pDev){
2661 			continue;
2662 		}
2663 		// Drive offline drives that previously existed but could not be found
2664 		// in the LCT table
2665 		if (pDev->state & DPTI_DEV_UNSCANNED){
2666 			pDev->state = DPTI_DEV_OFFLINE;
2667 			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2668 			if (pDev->pScsi_dev) {
2669 				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2670 			}
2671 		}
2672 	}
2673 	return 0;
2674 }
2675 
adpt_fail_posted_scbs(adpt_hba * pHba)2676 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2677 {
2678 	struct scsi_cmnd* 	cmd = NULL;
2679 	struct scsi_device* 	d = NULL;
2680 
2681 	shost_for_each_device(d, pHba->host) {
2682 		unsigned long flags;
2683 		spin_lock_irqsave(&d->list_lock, flags);
2684 		list_for_each_entry(cmd, &d->cmd_list, list) {
2685 			if(cmd->serial_number == 0){
2686 				continue;
2687 			}
2688 			cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2689 			cmd->scsi_done(cmd);
2690 		}
2691 		spin_unlock_irqrestore(&d->list_lock, flags);
2692 	}
2693 }
2694 
2695 
2696 /*============================================================================
2697  *  Routines from i2o subsystem
2698  *============================================================================
2699  */
2700 
2701 
2702 
2703 /*
2704  *	Bring an I2O controller into HOLD state. See the spec.
2705  */
adpt_i2o_activate_hba(adpt_hba * pHba)2706 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2707 {
2708 	int rcode;
2709 
2710 	if(pHba->initialized ) {
2711 		if (adpt_i2o_status_get(pHba) < 0) {
2712 			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2713 				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2714 				return rcode;
2715 			}
2716 			if (adpt_i2o_status_get(pHba) < 0) {
2717 				printk(KERN_INFO "HBA not responding.\n");
2718 				return -1;
2719 			}
2720 		}
2721 
2722 		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2723 			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2724 			return -1;
2725 		}
2726 
2727 		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2728 		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2729 		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2730 		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2731 			adpt_i2o_reset_hba(pHba);
2732 			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2733 				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2734 				return -1;
2735 			}
2736 		}
2737 	} else {
2738 		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2739 			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2740 			return rcode;
2741 		}
2742 
2743 	}
2744 
2745 	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2746 		return -1;
2747 	}
2748 
2749 	/* In HOLD state */
2750 
2751 	if (adpt_i2o_hrt_get(pHba) < 0) {
2752 		return -1;
2753 	}
2754 
2755 	return 0;
2756 }
2757 
2758 /*
2759  *	Bring a controller online into OPERATIONAL state.
2760  */
2761 
adpt_i2o_online_hba(adpt_hba * pHba)2762 static int adpt_i2o_online_hba(adpt_hba* pHba)
2763 {
2764 	if (adpt_i2o_systab_send(pHba) < 0) {
2765 		adpt_i2o_delete_hba(pHba);
2766 		return -1;
2767 	}
2768 	/* In READY state */
2769 
2770 	if (adpt_i2o_enable_hba(pHba) < 0) {
2771 		adpt_i2o_delete_hba(pHba);
2772 		return -1;
2773 	}
2774 
2775 	/* In OPERATIONAL state  */
2776 	return 0;
2777 }
2778 
adpt_send_nop(adpt_hba * pHba,u32 m)2779 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2780 {
2781 	u32 __iomem *msg;
2782 	ulong timeout = jiffies + 5*HZ;
2783 
2784 	while(m == EMPTY_QUEUE){
2785 		rmb();
2786 		m = readl(pHba->post_port);
2787 		if(m != EMPTY_QUEUE){
2788 			break;
2789 		}
2790 		if(time_after(jiffies,timeout)){
2791 			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2792 			return 2;
2793 		}
2794 		schedule_timeout_uninterruptible(1);
2795 	}
2796 	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2797 	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2798 	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2799 	writel( 0,&msg[2]);
2800 	wmb();
2801 
2802 	writel(m, pHba->post_port);
2803 	wmb();
2804 	return 0;
2805 }
2806 
adpt_i2o_init_outbound_q(adpt_hba * pHba)2807 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2808 {
2809 	u8 *status;
2810 	dma_addr_t addr;
2811 	u32 __iomem *msg = NULL;
2812 	int i;
2813 	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2814 	u32 m;
2815 
2816 	do {
2817 		rmb();
2818 		m = readl(pHba->post_port);
2819 		if (m != EMPTY_QUEUE) {
2820 			break;
2821 		}
2822 
2823 		if(time_after(jiffies,timeout)){
2824 			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2825 			return -ETIMEDOUT;
2826 		}
2827 		schedule_timeout_uninterruptible(1);
2828 	} while(m == EMPTY_QUEUE);
2829 
2830 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2831 
2832 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2833 	if (!status) {
2834 		adpt_send_nop(pHba, m);
2835 		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2836 			pHba->name);
2837 		return -ENOMEM;
2838 	}
2839 	memset(status, 0, 4);
2840 
2841 	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2842 	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2843 	writel(0, &msg[2]);
2844 	writel(0x0106, &msg[3]);	/* Transaction context */
2845 	writel(4096, &msg[4]);		/* Host page frame size */
2846 	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2847 	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2848 	writel((u32)addr, &msg[7]);
2849 
2850 	writel(m, pHba->post_port);
2851 	wmb();
2852 
2853 	// Wait for the reply status to come back
2854 	do {
2855 		if (*status) {
2856 			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2857 				break;
2858 			}
2859 		}
2860 		rmb();
2861 		if(time_after(jiffies,timeout)){
2862 			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2863 			/* We lose 4 bytes of "status" here, but we
2864 			   cannot free these because controller may
2865 			   awake and corrupt those bytes at any time */
2866 			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2867 			return -ETIMEDOUT;
2868 		}
2869 		schedule_timeout_uninterruptible(1);
2870 	} while (1);
2871 
2872 	// If the command was successful, fill the fifo with our reply
2873 	// message packets
2874 	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2875 		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2876 		return -2;
2877 	}
2878 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2879 
2880 	if(pHba->reply_pool != NULL) {
2881 		dma_free_coherent(&pHba->pDev->dev,
2882 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2883 			pHba->reply_pool, pHba->reply_pool_pa);
2884 	}
2885 
2886 	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2887 				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2888 				&pHba->reply_pool_pa, GFP_KERNEL);
2889 	if (!pHba->reply_pool) {
2890 		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2891 		return -ENOMEM;
2892 	}
2893 	memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2894 
2895 	for(i = 0; i < pHba->reply_fifo_size; i++) {
2896 		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2897 			pHba->reply_port);
2898 		wmb();
2899 	}
2900 	adpt_i2o_status_get(pHba);
2901 	return 0;
2902 }
2903 
2904 
2905 /*
2906  * I2O System Table.  Contains information about
2907  * all the IOPs in the system.  Used to inform IOPs
2908  * about each other's existence.
2909  *
2910  * sys_tbl_ver is the CurrentChangeIndicator that is
2911  * used by IOPs to track changes.
2912  */
2913 
2914 
2915 
adpt_i2o_status_get(adpt_hba * pHba)2916 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2917 {
2918 	ulong timeout;
2919 	u32 m;
2920 	u32 __iomem *msg;
2921 	u8 *status_block=NULL;
2922 
2923 	if(pHba->status_block == NULL) {
2924 		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2925 					sizeof(i2o_status_block),
2926 					&pHba->status_block_pa, GFP_KERNEL);
2927 		if(pHba->status_block == NULL) {
2928 			printk(KERN_ERR
2929 			"dpti%d: Get Status Block failed; Out of memory. \n",
2930 			pHba->unit);
2931 			return -ENOMEM;
2932 		}
2933 	}
2934 	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2935 	status_block = (u8*)(pHba->status_block);
2936 	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2937 	do {
2938 		rmb();
2939 		m = readl(pHba->post_port);
2940 		if (m != EMPTY_QUEUE) {
2941 			break;
2942 		}
2943 		if(time_after(jiffies,timeout)){
2944 			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2945 					pHba->name);
2946 			return -ETIMEDOUT;
2947 		}
2948 		schedule_timeout_uninterruptible(1);
2949 	} while(m==EMPTY_QUEUE);
2950 
2951 
2952 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2953 
2954 	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2955 	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2956 	writel(1, &msg[2]);
2957 	writel(0, &msg[3]);
2958 	writel(0, &msg[4]);
2959 	writel(0, &msg[5]);
2960 	writel( dma_low(pHba->status_block_pa), &msg[6]);
2961 	writel( dma_high(pHba->status_block_pa), &msg[7]);
2962 	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2963 
2964 	//post message
2965 	writel(m, pHba->post_port);
2966 	wmb();
2967 
2968 	while(status_block[87]!=0xff){
2969 		if(time_after(jiffies,timeout)){
2970 			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2971 				pHba->unit);
2972 			return -ETIMEDOUT;
2973 		}
2974 		rmb();
2975 		schedule_timeout_uninterruptible(1);
2976 	}
2977 
2978 	// Set up our number of outbound and inbound messages
2979 	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2980 	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2981 		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2982 	}
2983 
2984 	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2985 	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2986 		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2987 	}
2988 
2989 	// Calculate the Scatter Gather list size
2990 	if (dpt_dma64(pHba)) {
2991 		pHba->sg_tablesize
2992 		  = ((pHba->status_block->inbound_frame_size * 4
2993 		  - 14 * sizeof(u32))
2994 		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2995 	} else {
2996 		pHba->sg_tablesize
2997 		  = ((pHba->status_block->inbound_frame_size * 4
2998 		  - 12 * sizeof(u32))
2999 		  / sizeof(struct sg_simple_element));
3000 	}
3001 	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3002 		pHba->sg_tablesize = SG_LIST_ELEMENTS;
3003 	}
3004 
3005 
3006 #ifdef DEBUG
3007 	printk("dpti%d: State = ",pHba->unit);
3008 	switch(pHba->status_block->iop_state) {
3009 		case 0x01:
3010 			printk("INIT\n");
3011 			break;
3012 		case 0x02:
3013 			printk("RESET\n");
3014 			break;
3015 		case 0x04:
3016 			printk("HOLD\n");
3017 			break;
3018 		case 0x05:
3019 			printk("READY\n");
3020 			break;
3021 		case 0x08:
3022 			printk("OPERATIONAL\n");
3023 			break;
3024 		case 0x10:
3025 			printk("FAILED\n");
3026 			break;
3027 		case 0x11:
3028 			printk("FAULTED\n");
3029 			break;
3030 		default:
3031 			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3032 	}
3033 #endif
3034 	return 0;
3035 }
3036 
3037 /*
3038  * Get the IOP's Logical Configuration Table
3039  */
adpt_i2o_lct_get(adpt_hba * pHba)3040 static int adpt_i2o_lct_get(adpt_hba* pHba)
3041 {
3042 	u32 msg[8];
3043 	int ret;
3044 	u32 buf[16];
3045 
3046 	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3047 		pHba->lct_size = pHba->status_block->expected_lct_size;
3048 	}
3049 	do {
3050 		if (pHba->lct == NULL) {
3051 			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3052 					pHba->lct_size, &pHba->lct_pa,
3053 					GFP_ATOMIC);
3054 			if(pHba->lct == NULL) {
3055 				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3056 					pHba->name);
3057 				return -ENOMEM;
3058 			}
3059 		}
3060 		memset(pHba->lct, 0, pHba->lct_size);
3061 
3062 		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3063 		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3064 		msg[2] = 0;
3065 		msg[3] = 0;
3066 		msg[4] = 0xFFFFFFFF;	/* All devices */
3067 		msg[5] = 0x00000000;	/* Report now */
3068 		msg[6] = 0xD0000000|pHba->lct_size;
3069 		msg[7] = (u32)pHba->lct_pa;
3070 
3071 		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3072 			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3073 				pHba->name, ret);
3074 			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3075 			return ret;
3076 		}
3077 
3078 		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3079 			pHba->lct_size = pHba->lct->table_size << 2;
3080 			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3081 					pHba->lct, pHba->lct_pa);
3082 			pHba->lct = NULL;
3083 		}
3084 	} while (pHba->lct == NULL);
3085 
3086 	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3087 
3088 
3089 	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3090 	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3091 		pHba->FwDebugBufferSize = buf[1];
3092 		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3093 						pHba->FwDebugBufferSize);
3094 		if (pHba->FwDebugBuffer_P) {
3095 			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3096 							FW_DEBUG_FLAGS_OFFSET;
3097 			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3098 							FW_DEBUG_BLED_OFFSET;
3099 			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3100 			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3101 						FW_DEBUG_STR_LENGTH_OFFSET;
3102 			pHba->FwDebugBuffer_P += buf[2];
3103 			pHba->FwDebugFlags = 0;
3104 		}
3105 	}
3106 
3107 	return 0;
3108 }
3109 
adpt_i2o_build_sys_table(void)3110 static int adpt_i2o_build_sys_table(void)
3111 {
3112 	adpt_hba* pHba = hba_chain;
3113 	int count = 0;
3114 
3115 	if (sys_tbl)
3116 		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3117 					sys_tbl, sys_tbl_pa);
3118 
3119 	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3120 				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3121 
3122 	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3123 				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3124 	if (!sys_tbl) {
3125 		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3126 		return -ENOMEM;
3127 	}
3128 	memset(sys_tbl, 0, sys_tbl_len);
3129 
3130 	sys_tbl->num_entries = hba_count;
3131 	sys_tbl->version = I2OVERSION;
3132 	sys_tbl->change_ind = sys_tbl_ind++;
3133 
3134 	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3135 		u64 addr;
3136 		// Get updated Status Block so we have the latest information
3137 		if (adpt_i2o_status_get(pHba)) {
3138 			sys_tbl->num_entries--;
3139 			continue; // try next one
3140 		}
3141 
3142 		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3143 		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3144 		sys_tbl->iops[count].seg_num = 0;
3145 		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3146 		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3147 		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3148 		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3149 		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3150 		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3151 		addr = pHba->base_addr_phys + 0x40;
3152 		sys_tbl->iops[count].inbound_low = dma_low(addr);
3153 		sys_tbl->iops[count].inbound_high = dma_high(addr);
3154 
3155 		count++;
3156 	}
3157 
3158 #ifdef DEBUG
3159 {
3160 	u32 *table = (u32*)sys_tbl;
3161 	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3162 	for(count = 0; count < (sys_tbl_len >>2); count++) {
3163 		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3164 			count, table[count]);
3165 	}
3166 }
3167 #endif
3168 
3169 	return 0;
3170 }
3171 
3172 
3173 /*
3174  *	 Dump the information block associated with a given unit (TID)
3175  */
3176 
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3177 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3178 {
3179 	char buf[64];
3180 	int unit = d->lct_data.tid;
3181 
3182 	printk(KERN_INFO "TID %3.3d ", unit);
3183 
3184 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3185 	{
3186 		buf[16]=0;
3187 		printk(" Vendor: %-12.12s", buf);
3188 	}
3189 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3190 	{
3191 		buf[16]=0;
3192 		printk(" Device: %-12.12s", buf);
3193 	}
3194 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3195 	{
3196 		buf[8]=0;
3197 		printk(" Rev: %-12.12s\n", buf);
3198 	}
3199 #ifdef DEBUG
3200 	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3201 	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3202 	 printk(KERN_INFO "\tFlags: ");
3203 
3204 	 if(d->lct_data.device_flags&(1<<0))
3205 		  printk("C");	     // ConfigDialog requested
3206 	 if(d->lct_data.device_flags&(1<<1))
3207 		  printk("U");	     // Multi-user capable
3208 	 if(!(d->lct_data.device_flags&(1<<4)))
3209 		  printk("P");	     // Peer service enabled!
3210 	 if(!(d->lct_data.device_flags&(1<<5)))
3211 		  printk("M");	     // Mgmt service enabled!
3212 	 printk("\n");
3213 #endif
3214 }
3215 
3216 #ifdef DEBUG
3217 /*
3218  *	Do i2o class name lookup
3219  */
adpt_i2o_get_class_name(int class)3220 static const char *adpt_i2o_get_class_name(int class)
3221 {
3222 	int idx = 16;
3223 	static char *i2o_class_name[] = {
3224 		"Executive",
3225 		"Device Driver Module",
3226 		"Block Device",
3227 		"Tape Device",
3228 		"LAN Interface",
3229 		"WAN Interface",
3230 		"Fibre Channel Port",
3231 		"Fibre Channel Device",
3232 		"SCSI Device",
3233 		"ATE Port",
3234 		"ATE Device",
3235 		"Floppy Controller",
3236 		"Floppy Device",
3237 		"Secondary Bus Port",
3238 		"Peer Transport Agent",
3239 		"Peer Transport",
3240 		"Unknown"
3241 	};
3242 
3243 	switch(class&0xFFF) {
3244 	case I2O_CLASS_EXECUTIVE:
3245 		idx = 0; break;
3246 	case I2O_CLASS_DDM:
3247 		idx = 1; break;
3248 	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3249 		idx = 2; break;
3250 	case I2O_CLASS_SEQUENTIAL_STORAGE:
3251 		idx = 3; break;
3252 	case I2O_CLASS_LAN:
3253 		idx = 4; break;
3254 	case I2O_CLASS_WAN:
3255 		idx = 5; break;
3256 	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3257 		idx = 6; break;
3258 	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3259 		idx = 7; break;
3260 	case I2O_CLASS_SCSI_PERIPHERAL:
3261 		idx = 8; break;
3262 	case I2O_CLASS_ATE_PORT:
3263 		idx = 9; break;
3264 	case I2O_CLASS_ATE_PERIPHERAL:
3265 		idx = 10; break;
3266 	case I2O_CLASS_FLOPPY_CONTROLLER:
3267 		idx = 11; break;
3268 	case I2O_CLASS_FLOPPY_DEVICE:
3269 		idx = 12; break;
3270 	case I2O_CLASS_BUS_ADAPTER_PORT:
3271 		idx = 13; break;
3272 	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3273 		idx = 14; break;
3274 	case I2O_CLASS_PEER_TRANSPORT:
3275 		idx = 15; break;
3276 	}
3277 	return i2o_class_name[idx];
3278 }
3279 #endif
3280 
3281 
adpt_i2o_hrt_get(adpt_hba * pHba)3282 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3283 {
3284 	u32 msg[6];
3285 	int ret, size = sizeof(i2o_hrt);
3286 
3287 	do {
3288 		if (pHba->hrt == NULL) {
3289 			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3290 					size, &pHba->hrt_pa, GFP_KERNEL);
3291 			if (pHba->hrt == NULL) {
3292 				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3293 				return -ENOMEM;
3294 			}
3295 		}
3296 
3297 		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3298 		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3299 		msg[2]= 0;
3300 		msg[3]= 0;
3301 		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3302 		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3303 
3304 		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3305 			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3306 			return ret;
3307 		}
3308 
3309 		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3310 			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3311 			dma_free_coherent(&pHba->pDev->dev, size,
3312 				pHba->hrt, pHba->hrt_pa);
3313 			size = newsize;
3314 			pHba->hrt = NULL;
3315 		}
3316 	} while(pHba->hrt == NULL);
3317 	return 0;
3318 }
3319 
3320 /*
3321  *	 Query one scalar group value or a whole scalar group.
3322  */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3323 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3324 			int group, int field, void *buf, int buflen)
3325 {
3326 	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3327 	u8 *opblk_va;
3328 	dma_addr_t opblk_pa;
3329 	u8 *resblk_va;
3330 	dma_addr_t resblk_pa;
3331 
3332 	int size;
3333 
3334 	/* 8 bytes for header */
3335 	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3336 			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3337 	if (resblk_va == NULL) {
3338 		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3339 		return -ENOMEM;
3340 	}
3341 
3342 	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3343 			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3344 	if (opblk_va == NULL) {
3345 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3346 			resblk_va, resblk_pa);
3347 		printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3348 			pHba->name);
3349 		return -ENOMEM;
3350 	}
3351 	if (field == -1)  		/* whole group */
3352 			opblk[4] = -1;
3353 
3354 	memcpy(opblk_va, opblk, sizeof(opblk));
3355 	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3356 		opblk_va, opblk_pa, sizeof(opblk),
3357 		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3358 	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3359 	if (size == -ETIME) {
3360 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3361 							resblk_va, resblk_pa);
3362 		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3363 		return -ETIME;
3364 	} else if (size == -EINTR) {
3365 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3366 							resblk_va, resblk_pa);
3367 		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3368 		return -EINTR;
3369 	}
3370 
3371 	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3372 
3373 	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3374 						resblk_va, resblk_pa);
3375 	if (size < 0)
3376 		return size;
3377 
3378 	return buflen;
3379 }
3380 
3381 
3382 /*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3383  *
3384  *	This function can be used for all UtilParamsGet/Set operations.
3385  *	The OperationBlock is given in opblk-buffer,
3386  *	and results are returned in resblk-buffer.
3387  *	Note that the minimum sized resblk is 8 bytes and contains
3388  *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3389  */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3390 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3391 		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3392 		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3393 {
3394 	u32 msg[9];
3395 	u32 *res = (u32 *)resblk_va;
3396 	int wait_status;
3397 
3398 	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3399 	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3400 	msg[2] = 0;
3401 	msg[3] = 0;
3402 	msg[4] = 0;
3403 	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3404 	msg[6] = (u32)opblk_pa;
3405 	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3406 	msg[8] = (u32)resblk_pa;
3407 
3408 	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3409 		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3410    		return wait_status; 	/* -DetailedStatus */
3411 	}
3412 
3413 	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3414 		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3415 			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3416 			pHba->name,
3417 			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3418 							 : "PARAMS_GET",
3419 			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3420 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3421 	}
3422 
3423 	 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3424 }
3425 
3426 
adpt_i2o_quiesce_hba(adpt_hba * pHba)3427 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3428 {
3429 	u32 msg[4];
3430 	int ret;
3431 
3432 	adpt_i2o_status_get(pHba);
3433 
3434 	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3435 
3436 	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3437    	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3438 		return 0;
3439 	}
3440 
3441 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3442 	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3443 	msg[2] = 0;
3444 	msg[3] = 0;
3445 
3446 	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3447 		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3448 				pHba->unit, -ret);
3449 	} else {
3450 		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3451 	}
3452 
3453 	adpt_i2o_status_get(pHba);
3454 	return ret;
3455 }
3456 
3457 
3458 /*
3459  * Enable IOP. Allows the IOP to resume external operations.
3460  */
adpt_i2o_enable_hba(adpt_hba * pHba)3461 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3462 {
3463 	u32 msg[4];
3464 	int ret;
3465 
3466 	adpt_i2o_status_get(pHba);
3467 	if(!pHba->status_block){
3468 		return -ENOMEM;
3469 	}
3470 	/* Enable only allowed on READY state */
3471 	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3472 		return 0;
3473 
3474 	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3475 		return -EINVAL;
3476 
3477 	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3478 	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3479 	msg[2]= 0;
3480 	msg[3]= 0;
3481 
3482 	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3483 		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3484 			pHba->name, ret);
3485 	} else {
3486 		PDEBUG("%s: Enabled.\n", pHba->name);
3487 	}
3488 
3489 	adpt_i2o_status_get(pHba);
3490 	return ret;
3491 }
3492 
3493 
adpt_i2o_systab_send(adpt_hba * pHba)3494 static int adpt_i2o_systab_send(adpt_hba* pHba)
3495 {
3496 	 u32 msg[12];
3497 	 int ret;
3498 
3499 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3500 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3501 	msg[2] = 0;
3502 	msg[3] = 0;
3503 	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3504 	msg[5] = 0;				   /* Segment 0 */
3505 
3506 	/*
3507 	 * Provide three SGL-elements:
3508 	 * System table (SysTab), Private memory space declaration and
3509 	 * Private i/o space declaration
3510 	 */
3511 	msg[6] = 0x54000000 | sys_tbl_len;
3512 	msg[7] = (u32)sys_tbl_pa;
3513 	msg[8] = 0x54000000 | 0;
3514 	msg[9] = 0;
3515 	msg[10] = 0xD4000000 | 0;
3516 	msg[11] = 0;
3517 
3518 	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3519 		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3520 			pHba->name, ret);
3521 	}
3522 #ifdef DEBUG
3523 	else {
3524 		PINFO("%s: SysTab set.\n", pHba->name);
3525 	}
3526 #endif
3527 
3528 	return ret;
3529  }
3530 
3531 
3532 /*============================================================================
3533  *
3534  *============================================================================
3535  */
3536 
3537 
3538 #ifdef UARTDELAY
3539 
adpt_delay(int millisec)3540 static static void adpt_delay(int millisec)
3541 {
3542 	int i;
3543 	for (i = 0; i < millisec; i++) {
3544 		udelay(1000);	/* delay for one millisecond */
3545 	}
3546 }
3547 
3548 #endif
3549 
3550 static struct scsi_host_template driver_template = {
3551 	.module			= THIS_MODULE,
3552 	.name			= "dpt_i2o",
3553 	.proc_name		= "dpt_i2o",
3554 	.show_info		= adpt_show_info,
3555 	.info			= adpt_info,
3556 	.queuecommand		= adpt_queue,
3557 	.eh_abort_handler	= adpt_abort,
3558 	.eh_device_reset_handler = adpt_device_reset,
3559 	.eh_bus_reset_handler	= adpt_bus_reset,
3560 	.eh_host_reset_handler	= adpt_reset,
3561 	.bios_param		= adpt_bios_param,
3562 	.slave_configure	= adpt_slave_configure,
3563 	.can_queue		= MAX_TO_IOP_MESSAGES,
3564 	.this_id		= 7,
3565 	.cmd_per_lun		= 1,
3566 	.use_clustering		= ENABLE_CLUSTERING,
3567 };
3568 
adpt_init(void)3569 static int __init adpt_init(void)
3570 {
3571 	int		error;
3572 	adpt_hba	*pHba, *next;
3573 
3574 	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3575 
3576 	error = adpt_detect(&driver_template);
3577 	if (error < 0)
3578 		return error;
3579 	if (hba_chain == NULL)
3580 		return -ENODEV;
3581 
3582 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3583 		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3584 		if (error)
3585 			goto fail;
3586 		scsi_scan_host(pHba->host);
3587 	}
3588 	return 0;
3589 fail:
3590 	for (pHba = hba_chain; pHba; pHba = next) {
3591 		next = pHba->next;
3592 		scsi_remove_host(pHba->host);
3593 	}
3594 	return error;
3595 }
3596 
adpt_exit(void)3597 static void __exit adpt_exit(void)
3598 {
3599 	adpt_hba	*pHba, *next;
3600 
3601 	for (pHba = hba_chain; pHba; pHba = pHba->next)
3602 		scsi_remove_host(pHba->host);
3603 	for (pHba = hba_chain; pHba; pHba = next) {
3604 		next = pHba->next;
3605 		adpt_release(pHba->host);
3606 	}
3607 }
3608 
3609 module_init(adpt_init);
3610 module_exit(adpt_exit);
3611 
3612 MODULE_LICENSE("GPL");
3613