1/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/irqreturn.h>
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME		"esp"
33#define PFX DRV_MODULE_NAME	": "
34#define DRV_VERSION		"2.000"
35#define DRV_MODULE_RELDATE	"April 19, 2007"
36
37/* SCSI bus reset settle time in seconds.  */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR		0x00000001
42#define ESP_DEBUG_SCSICMD	0x00000002
43#define ESP_DEBUG_RESET		0x00000004
44#define ESP_DEBUG_MSGIN		0x00000008
45#define ESP_DEBUG_MSGOUT	0x00000010
46#define ESP_DEBUG_CMDDONE	0x00000020
47#define ESP_DEBUG_DISCONNECT	0x00000040
48#define ESP_DEBUG_DATASTART	0x00000080
49#define ESP_DEBUG_DATADONE	0x00000100
50#define ESP_DEBUG_RECONNECT	0x00000200
51#define ESP_DEBUG_AUTOSENSE	0x00000400
52#define ESP_DEBUG_EVENT		0x00000800
53#define ESP_DEBUG_COMMAND	0x00001000
54
55#define esp_log_intr(f, a...) \
56do {	if (esp_debug & ESP_DEBUG_INTR) \
57		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
58} while (0)
59
60#define esp_log_reset(f, a...) \
61do {	if (esp_debug & ESP_DEBUG_RESET) \
62		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
63} while (0)
64
65#define esp_log_msgin(f, a...) \
66do {	if (esp_debug & ESP_DEBUG_MSGIN) \
67		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
68} while (0)
69
70#define esp_log_msgout(f, a...) \
71do {	if (esp_debug & ESP_DEBUG_MSGOUT) \
72		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
73} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do {	if (esp_debug & ESP_DEBUG_CMDDONE) \
77		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
78} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do {	if (esp_debug & ESP_DEBUG_DISCONNECT) \
82		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
83} while (0)
84
85#define esp_log_datastart(f, a...) \
86do {	if (esp_debug & ESP_DEBUG_DATASTART) \
87		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
88} while (0)
89
90#define esp_log_datadone(f, a...) \
91do {	if (esp_debug & ESP_DEBUG_DATADONE) \
92		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
93} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do {	if (esp_debug & ESP_DEBUG_RECONNECT) \
97		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
98} while (0)
99
100#define esp_log_autosense(f, a...) \
101do {	if (esp_debug & ESP_DEBUG_AUTOSENSE) \
102		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
103} while (0)
104
105#define esp_log_event(f, a...) \
106do {   if (esp_debug & ESP_DEBUG_EVENT)	\
107		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
108} while (0)
109
110#define esp_log_command(f, a...) \
111do {   if (esp_debug & ESP_DEBUG_COMMAND)	\
112		shost_printk(KERN_DEBUG, esp->host, f, ## a);	\
113} while (0)
114
115#define esp_read8(REG)		esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG)	esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119			      struct esp_event_ent *p)
120{
121	p->sreg = esp->sreg;
122	p->seqreg = esp->seqreg;
123	p->sreg2 = esp->sreg2;
124	p->ireg = esp->ireg;
125	p->select_state = esp->select_state;
126	p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131	struct esp_event_ent *p;
132	int idx = esp->esp_event_cur;
133
134	p = &esp->esp_event_log[idx];
135	p->type = ESP_EVENT_TYPE_CMD;
136	p->val = val;
137	esp_log_fill_regs(esp, p);
138
139	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
141	esp_log_command("cmd[%02x]\n", val);
142	esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148	if (esp->flags & ESP_FLAG_USE_FIFO) {
149		int i;
150
151		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152		for (i = 0; i < len; i++)
153			esp_write8(esp->command_block[i], ESP_FDATA);
154		scsi_esp_cmd(esp, cmd);
155	} else {
156		if (esp->rev == FASHME)
157			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158		cmd |= ESP_CMD_DMA;
159		esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160				       len, max_len, 0, cmd);
161	}
162}
163
164static void esp_event(struct esp *esp, u8 val)
165{
166	struct esp_event_ent *p;
167	int idx = esp->esp_event_cur;
168
169	p = &esp->esp_event_log[idx];
170	p->type = ESP_EVENT_TYPE_EVENT;
171	p->val = val;
172	esp_log_fill_regs(esp, p);
173
174	esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176	esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181	int idx = esp->esp_event_cur;
182	int stop = idx;
183
184	shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
185	do {
186		struct esp_event_ent *p = &esp->esp_event_log[idx];
187
188		shost_printk(KERN_INFO, esp->host,
189			     "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190			     "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191			     idx,
192			     p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193			     p->val, p->sreg, p->seqreg,
194			     p->sreg2, p->ireg, p->select_state, p->event);
195
196		idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197	} while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202	scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203	if (esp->rev == ESP236) {
204		int lim = 1000;
205
206		while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207			if (--lim == 0) {
208				shost_printk(KERN_ALERT, esp->host,
209					     "ESP_FF_BYTES will not clear!\n");
210				break;
211			}
212			udelay(1);
213		}
214	}
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219	int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220	int idx = 0;
221
222	while (fcnt--) {
223		esp->fifo[idx++] = esp_read8(ESP_FDATA);
224		esp->fifo[idx++] = esp_read8(ESP_FDATA);
225	}
226	if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227		esp_write8(0, ESP_FDATA);
228		esp->fifo[idx++] = esp_read8(ESP_FDATA);
229		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230	}
231	esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236	int i;
237
238	for (i = 0; i < ESP_MAX_TARGET; i++)
239		esp->target[i].esp_config3 = val;
240}
241
242/* Reset the ESP chip, _not_ the SCSI bus. */
243static void esp_reset_esp(struct esp *esp)
244{
245	u8 family_code, version;
246
247	/* Now reset the ESP chip */
248	scsi_esp_cmd(esp, ESP_CMD_RC);
249	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
250	if (esp->rev == FAST)
251		esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
252	scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
254	/* This is the only point at which it is reliable to read
255	 * the ID-code for a fast ESP chip variants.
256	 */
257	esp->max_period = ((35 * esp->ccycle) / 1000);
258	if (esp->rev == FAST) {
259		version = esp_read8(ESP_UID);
260		family_code = (version & 0xf8) >> 3;
261		if (family_code == 0x02)
262			esp->rev = FAS236;
263		else if (family_code == 0x0a)
264			esp->rev = FASHME; /* Version is usually '5'. */
265		else
266			esp->rev = FAS100A;
267		esp->min_period = ((4 * esp->ccycle) / 1000);
268	} else {
269		esp->min_period = ((5 * esp->ccycle) / 1000);
270	}
271	if (esp->rev == FAS236) {
272		/*
273		 * The AM53c974 chip returns the same ID as FAS236;
274		 * try to configure glitch eater.
275		 */
276		u8 config4 = ESP_CONFIG4_GE1;
277		esp_write8(config4, ESP_CFG4);
278		config4 = esp_read8(ESP_CFG4);
279		if (config4 & ESP_CONFIG4_GE1) {
280			esp->rev = PCSCSI;
281			esp_write8(esp->config4, ESP_CFG4);
282		}
283	}
284	esp->max_period = (esp->max_period + 3)>>2;
285	esp->min_period = (esp->min_period + 3)>>2;
286
287	esp_write8(esp->config1, ESP_CFG1);
288	switch (esp->rev) {
289	case ESP100:
290		/* nothing to do */
291		break;
292
293	case ESP100A:
294		esp_write8(esp->config2, ESP_CFG2);
295		break;
296
297	case ESP236:
298		/* Slow 236 */
299		esp_write8(esp->config2, ESP_CFG2);
300		esp->prev_cfg3 = esp->target[0].esp_config3;
301		esp_write8(esp->prev_cfg3, ESP_CFG3);
302		break;
303
304	case FASHME:
305		esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
306		/* fallthrough... */
307
308	case FAS236:
309	case PCSCSI:
310		/* Fast 236, AM53c974 or HME */
311		esp_write8(esp->config2, ESP_CFG2);
312		if (esp->rev == FASHME) {
313			u8 cfg3 = esp->target[0].esp_config3;
314
315			cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
316			if (esp->scsi_id >= 8)
317				cfg3 |= ESP_CONFIG3_IDBIT3;
318			esp_set_all_config3(esp, cfg3);
319		} else {
320			u32 cfg3 = esp->target[0].esp_config3;
321
322			cfg3 |= ESP_CONFIG3_FCLK;
323			esp_set_all_config3(esp, cfg3);
324		}
325		esp->prev_cfg3 = esp->target[0].esp_config3;
326		esp_write8(esp->prev_cfg3, ESP_CFG3);
327		if (esp->rev == FASHME) {
328			esp->radelay = 80;
329		} else {
330			if (esp->flags & ESP_FLAG_DIFFERENTIAL)
331				esp->radelay = 0;
332			else
333				esp->radelay = 96;
334		}
335		break;
336
337	case FAS100A:
338		/* Fast 100a */
339		esp_write8(esp->config2, ESP_CFG2);
340		esp_set_all_config3(esp,
341				    (esp->target[0].esp_config3 |
342				     ESP_CONFIG3_FCLOCK));
343		esp->prev_cfg3 = esp->target[0].esp_config3;
344		esp_write8(esp->prev_cfg3, ESP_CFG3);
345		esp->radelay = 32;
346		break;
347
348	default:
349		break;
350	}
351
352	/* Reload the configuration registers */
353	esp_write8(esp->cfact, ESP_CFACT);
354
355	esp->prev_stp = 0;
356	esp_write8(esp->prev_stp, ESP_STP);
357
358	esp->prev_soff = 0;
359	esp_write8(esp->prev_soff, ESP_SOFF);
360
361	esp_write8(esp->neg_defp, ESP_TIMEO);
362
363	/* Eat any bitrot in the chip */
364	esp_read8(ESP_INTRPT);
365	udelay(100);
366}
367
368static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
369{
370	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
371	struct scatterlist *sg = scsi_sglist(cmd);
372	int dir = cmd->sc_data_direction;
373	int total, i;
374
375	if (dir == DMA_NONE)
376		return;
377
378	spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
379	spriv->cur_residue = sg_dma_len(sg);
380	spriv->cur_sg = sg;
381
382	total = 0;
383	for (i = 0; i < spriv->u.num_sg; i++)
384		total += sg_dma_len(&sg[i]);
385	spriv->tot_residue = total;
386}
387
388static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
389				   struct scsi_cmnd *cmd)
390{
391	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
392
393	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
394		return ent->sense_dma +
395			(ent->sense_ptr - cmd->sense_buffer);
396	}
397
398	return sg_dma_address(p->cur_sg) +
399		(sg_dma_len(p->cur_sg) -
400		 p->cur_residue);
401}
402
403static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
404				    struct scsi_cmnd *cmd)
405{
406	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
407
408	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
409		return SCSI_SENSE_BUFFERSIZE -
410			(ent->sense_ptr - cmd->sense_buffer);
411	}
412	return p->cur_residue;
413}
414
415static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
416			    struct scsi_cmnd *cmd, unsigned int len)
417{
418	struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
419
420	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
421		ent->sense_ptr += len;
422		return;
423	}
424
425	p->cur_residue -= len;
426	p->tot_residue -= len;
427	if (p->cur_residue < 0 || p->tot_residue < 0) {
428		shost_printk(KERN_ERR, esp->host,
429			     "Data transfer overflow.\n");
430		shost_printk(KERN_ERR, esp->host,
431			     "cur_residue[%d] tot_residue[%d] len[%u]\n",
432			     p->cur_residue, p->tot_residue, len);
433		p->cur_residue = 0;
434		p->tot_residue = 0;
435	}
436	if (!p->cur_residue && p->tot_residue) {
437		p->cur_sg++;
438		p->cur_residue = sg_dma_len(p->cur_sg);
439	}
440}
441
442static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
443{
444	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
445	int dir = cmd->sc_data_direction;
446
447	if (dir == DMA_NONE)
448		return;
449
450	esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
451}
452
453static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
454{
455	struct scsi_cmnd *cmd = ent->cmd;
456	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
457
458	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459		ent->saved_sense_ptr = ent->sense_ptr;
460		return;
461	}
462	ent->saved_cur_residue = spriv->cur_residue;
463	ent->saved_cur_sg = spriv->cur_sg;
464	ent->saved_tot_residue = spriv->tot_residue;
465}
466
467static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
468{
469	struct scsi_cmnd *cmd = ent->cmd;
470	struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
471
472	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
473		ent->sense_ptr = ent->saved_sense_ptr;
474		return;
475	}
476	spriv->cur_residue = ent->saved_cur_residue;
477	spriv->cur_sg = ent->saved_cur_sg;
478	spriv->tot_residue = ent->saved_tot_residue;
479}
480
481static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
482{
483	if (cmd->cmd_len == 6 ||
484	    cmd->cmd_len == 10 ||
485	    cmd->cmd_len == 12) {
486		esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
487	} else {
488		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
489	}
490}
491
492static void esp_write_tgt_config3(struct esp *esp, int tgt)
493{
494	if (esp->rev > ESP100A) {
495		u8 val = esp->target[tgt].esp_config3;
496
497		if (val != esp->prev_cfg3) {
498			esp->prev_cfg3 = val;
499			esp_write8(val, ESP_CFG3);
500		}
501	}
502}
503
504static void esp_write_tgt_sync(struct esp *esp, int tgt)
505{
506	u8 off = esp->target[tgt].esp_offset;
507	u8 per = esp->target[tgt].esp_period;
508
509	if (off != esp->prev_soff) {
510		esp->prev_soff = off;
511		esp_write8(off, ESP_SOFF);
512	}
513	if (per != esp->prev_stp) {
514		esp->prev_stp = per;
515		esp_write8(per, ESP_STP);
516	}
517}
518
519static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
520{
521	if (esp->rev == FASHME) {
522		/* Arbitrary segment boundaries, 24-bit counts.  */
523		if (dma_len > (1U << 24))
524			dma_len = (1U << 24);
525	} else {
526		u32 base, end;
527
528		/* ESP chip limits other variants by 16-bits of transfer
529		 * count.  Actually on FAS100A and FAS236 we could get
530		 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
531		 * in the ESP_CFG2 register but that causes other unwanted
532		 * changes so we don't use it currently.
533		 */
534		if (dma_len > (1U << 16))
535			dma_len = (1U << 16);
536
537		/* All of the DMA variants hooked up to these chips
538		 * cannot handle crossing a 24-bit address boundary.
539		 */
540		base = dma_addr & ((1U << 24) - 1U);
541		end = base + dma_len;
542		if (end > (1U << 24))
543			end = (1U <<24);
544		dma_len = end - base;
545	}
546	return dma_len;
547}
548
549static int esp_need_to_nego_wide(struct esp_target_data *tp)
550{
551	struct scsi_target *target = tp->starget;
552
553	return spi_width(target) != tp->nego_goal_width;
554}
555
556static int esp_need_to_nego_sync(struct esp_target_data *tp)
557{
558	struct scsi_target *target = tp->starget;
559
560	/* When offset is zero, period is "don't care".  */
561	if (!spi_offset(target) && !tp->nego_goal_offset)
562		return 0;
563
564	if (spi_offset(target) == tp->nego_goal_offset &&
565	    spi_period(target) == tp->nego_goal_period)
566		return 0;
567
568	return 1;
569}
570
571static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
572			     struct esp_lun_data *lp)
573{
574	if (!ent->orig_tag[0]) {
575		/* Non-tagged, slot already taken?  */
576		if (lp->non_tagged_cmd)
577			return -EBUSY;
578
579		if (lp->hold) {
580			/* We are being held by active tagged
581			 * commands.
582			 */
583			if (lp->num_tagged)
584				return -EBUSY;
585
586			/* Tagged commands completed, we can unplug
587			 * the queue and run this untagged command.
588			 */
589			lp->hold = 0;
590		} else if (lp->num_tagged) {
591			/* Plug the queue until num_tagged decreases
592			 * to zero in esp_free_lun_tag.
593			 */
594			lp->hold = 1;
595			return -EBUSY;
596		}
597
598		lp->non_tagged_cmd = ent;
599		return 0;
600	} else {
601		/* Tagged command, see if blocked by a
602		 * non-tagged one.
603		 */
604		if (lp->non_tagged_cmd || lp->hold)
605			return -EBUSY;
606	}
607
608	BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
609
610	lp->tagged_cmds[ent->orig_tag[1]] = ent;
611	lp->num_tagged++;
612
613	return 0;
614}
615
616static void esp_free_lun_tag(struct esp_cmd_entry *ent,
617			     struct esp_lun_data *lp)
618{
619	if (ent->orig_tag[0]) {
620		BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
621		lp->tagged_cmds[ent->orig_tag[1]] = NULL;
622		lp->num_tagged--;
623	} else {
624		BUG_ON(lp->non_tagged_cmd != ent);
625		lp->non_tagged_cmd = NULL;
626	}
627}
628
629/* When a contingent allegiance conditon is created, we force feed a
630 * REQUEST_SENSE command to the device to fetch the sense data.  I
631 * tried many other schemes, relying on the scsi error handling layer
632 * to send out the REQUEST_SENSE automatically, but this was difficult
633 * to get right especially in the presence of applications like smartd
634 * which use SG_IO to send out their own REQUEST_SENSE commands.
635 */
636static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
637{
638	struct scsi_cmnd *cmd = ent->cmd;
639	struct scsi_device *dev = cmd->device;
640	int tgt, lun;
641	u8 *p, val;
642
643	tgt = dev->id;
644	lun = dev->lun;
645
646
647	if (!ent->sense_ptr) {
648		esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
649				  tgt, lun);
650
651		ent->sense_ptr = cmd->sense_buffer;
652		ent->sense_dma = esp->ops->map_single(esp,
653						      ent->sense_ptr,
654						      SCSI_SENSE_BUFFERSIZE,
655						      DMA_FROM_DEVICE);
656	}
657	ent->saved_sense_ptr = ent->sense_ptr;
658
659	esp->active_cmd = ent;
660
661	p = esp->command_block;
662	esp->msg_out_len = 0;
663
664	*p++ = IDENTIFY(0, lun);
665	*p++ = REQUEST_SENSE;
666	*p++ = ((dev->scsi_level <= SCSI_2) ?
667		(lun << 5) : 0);
668	*p++ = 0;
669	*p++ = 0;
670	*p++ = SCSI_SENSE_BUFFERSIZE;
671	*p++ = 0;
672
673	esp->select_state = ESP_SELECT_BASIC;
674
675	val = tgt;
676	if (esp->rev == FASHME)
677		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
678	esp_write8(val, ESP_BUSID);
679
680	esp_write_tgt_sync(esp, tgt);
681	esp_write_tgt_config3(esp, tgt);
682
683	val = (p - esp->command_block);
684
685	esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
686}
687
688static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
689{
690	struct esp_cmd_entry *ent;
691
692	list_for_each_entry(ent, &esp->queued_cmds, list) {
693		struct scsi_cmnd *cmd = ent->cmd;
694		struct scsi_device *dev = cmd->device;
695		struct esp_lun_data *lp = dev->hostdata;
696
697		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
698			ent->tag[0] = 0;
699			ent->tag[1] = 0;
700			return ent;
701		}
702
703		if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
704			ent->tag[0] = 0;
705			ent->tag[1] = 0;
706		}
707		ent->orig_tag[0] = ent->tag[0];
708		ent->orig_tag[1] = ent->tag[1];
709
710		if (esp_alloc_lun_tag(ent, lp) < 0)
711			continue;
712
713		return ent;
714	}
715
716	return NULL;
717}
718
719static void esp_maybe_execute_command(struct esp *esp)
720{
721	struct esp_target_data *tp;
722	struct esp_lun_data *lp;
723	struct scsi_device *dev;
724	struct scsi_cmnd *cmd;
725	struct esp_cmd_entry *ent;
726	int tgt, lun, i;
727	u32 val, start_cmd;
728	u8 *p;
729
730	if (esp->active_cmd ||
731	    (esp->flags & ESP_FLAG_RESETTING))
732		return;
733
734	ent = find_and_prep_issuable_command(esp);
735	if (!ent)
736		return;
737
738	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
739		esp_autosense(esp, ent);
740		return;
741	}
742
743	cmd = ent->cmd;
744	dev = cmd->device;
745	tgt = dev->id;
746	lun = dev->lun;
747	tp = &esp->target[tgt];
748	lp = dev->hostdata;
749
750	list_move(&ent->list, &esp->active_cmds);
751
752	esp->active_cmd = ent;
753
754	esp_map_dma(esp, cmd);
755	esp_save_pointers(esp, ent);
756
757	esp_check_command_len(esp, cmd);
758
759	p = esp->command_block;
760
761	esp->msg_out_len = 0;
762	if (tp->flags & ESP_TGT_CHECK_NEGO) {
763		/* Need to negotiate.  If the target is broken
764		 * go for synchronous transfers and non-wide.
765		 */
766		if (tp->flags & ESP_TGT_BROKEN) {
767			tp->flags &= ~ESP_TGT_DISCONNECT;
768			tp->nego_goal_period = 0;
769			tp->nego_goal_offset = 0;
770			tp->nego_goal_width = 0;
771			tp->nego_goal_tags = 0;
772		}
773
774		/* If the settings are not changing, skip this.  */
775		if (spi_width(tp->starget) == tp->nego_goal_width &&
776		    spi_period(tp->starget) == tp->nego_goal_period &&
777		    spi_offset(tp->starget) == tp->nego_goal_offset) {
778			tp->flags &= ~ESP_TGT_CHECK_NEGO;
779			goto build_identify;
780		}
781
782		if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
783			esp->msg_out_len =
784				spi_populate_width_msg(&esp->msg_out[0],
785						       (tp->nego_goal_width ?
786							1 : 0));
787			tp->flags |= ESP_TGT_NEGO_WIDE;
788		} else if (esp_need_to_nego_sync(tp)) {
789			esp->msg_out_len =
790				spi_populate_sync_msg(&esp->msg_out[0],
791						      tp->nego_goal_period,
792						      tp->nego_goal_offset);
793			tp->flags |= ESP_TGT_NEGO_SYNC;
794		} else {
795			tp->flags &= ~ESP_TGT_CHECK_NEGO;
796		}
797
798		/* Process it like a slow command.  */
799		if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
800			esp->flags |= ESP_FLAG_DOING_SLOWCMD;
801	}
802
803build_identify:
804	/* If we don't have a lun-data struct yet, we're probing
805	 * so do not disconnect.  Also, do not disconnect unless
806	 * we have a tag on this command.
807	 */
808	if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
809		*p++ = IDENTIFY(1, lun);
810	else
811		*p++ = IDENTIFY(0, lun);
812
813	if (ent->tag[0] && esp->rev == ESP100) {
814		/* ESP100 lacks select w/atn3 command, use select
815		 * and stop instead.
816		 */
817		esp->flags |= ESP_FLAG_DOING_SLOWCMD;
818	}
819
820	if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
821		start_cmd = ESP_CMD_SELA;
822		if (ent->tag[0]) {
823			*p++ = ent->tag[0];
824			*p++ = ent->tag[1];
825
826			start_cmd = ESP_CMD_SA3;
827		}
828
829		for (i = 0; i < cmd->cmd_len; i++)
830			*p++ = cmd->cmnd[i];
831
832		esp->select_state = ESP_SELECT_BASIC;
833	} else {
834		esp->cmd_bytes_left = cmd->cmd_len;
835		esp->cmd_bytes_ptr = &cmd->cmnd[0];
836
837		if (ent->tag[0]) {
838			for (i = esp->msg_out_len - 1;
839			     i >= 0; i--)
840				esp->msg_out[i + 2] = esp->msg_out[i];
841			esp->msg_out[0] = ent->tag[0];
842			esp->msg_out[1] = ent->tag[1];
843			esp->msg_out_len += 2;
844		}
845
846		start_cmd = ESP_CMD_SELAS;
847		esp->select_state = ESP_SELECT_MSGOUT;
848	}
849	val = tgt;
850	if (esp->rev == FASHME)
851		val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
852	esp_write8(val, ESP_BUSID);
853
854	esp_write_tgt_sync(esp, tgt);
855	esp_write_tgt_config3(esp, tgt);
856
857	val = (p - esp->command_block);
858
859	if (esp_debug & ESP_DEBUG_SCSICMD) {
860		printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
861		for (i = 0; i < cmd->cmd_len; i++)
862			printk("%02x ", cmd->cmnd[i]);
863		printk("]\n");
864	}
865
866	esp_send_dma_cmd(esp, val, 16, start_cmd);
867}
868
869static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
870{
871	struct list_head *head = &esp->esp_cmd_pool;
872	struct esp_cmd_entry *ret;
873
874	if (list_empty(head)) {
875		ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
876	} else {
877		ret = list_entry(head->next, struct esp_cmd_entry, list);
878		list_del(&ret->list);
879		memset(ret, 0, sizeof(*ret));
880	}
881	return ret;
882}
883
884static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
885{
886	list_add(&ent->list, &esp->esp_cmd_pool);
887}
888
889static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
890			    struct scsi_cmnd *cmd, unsigned int result)
891{
892	struct scsi_device *dev = cmd->device;
893	int tgt = dev->id;
894	int lun = dev->lun;
895
896	esp->active_cmd = NULL;
897	esp_unmap_dma(esp, cmd);
898	esp_free_lun_tag(ent, dev->hostdata);
899	cmd->result = result;
900
901	if (ent->eh_done) {
902		complete(ent->eh_done);
903		ent->eh_done = NULL;
904	}
905
906	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
907		esp->ops->unmap_single(esp, ent->sense_dma,
908				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
909		ent->sense_ptr = NULL;
910
911		/* Restore the message/status bytes to what we actually
912		 * saw originally.  Also, report that we are providing
913		 * the sense data.
914		 */
915		cmd->result = ((DRIVER_SENSE << 24) |
916			       (DID_OK << 16) |
917			       (COMMAND_COMPLETE << 8) |
918			       (SAM_STAT_CHECK_CONDITION << 0));
919
920		ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
921		if (esp_debug & ESP_DEBUG_AUTOSENSE) {
922			int i;
923
924			printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
925			       esp->host->unique_id, tgt, lun);
926			for (i = 0; i < 18; i++)
927				printk("%02x ", cmd->sense_buffer[i]);
928			printk("]\n");
929		}
930	}
931
932	cmd->scsi_done(cmd);
933
934	list_del(&ent->list);
935	esp_put_ent(esp, ent);
936
937	esp_maybe_execute_command(esp);
938}
939
940static unsigned int compose_result(unsigned int status, unsigned int message,
941				   unsigned int driver_code)
942{
943	return (status | (message << 8) | (driver_code << 16));
944}
945
946static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
947{
948	struct scsi_device *dev = ent->cmd->device;
949	struct esp_lun_data *lp = dev->hostdata;
950
951	scsi_track_queue_full(dev, lp->num_tagged - 1);
952}
953
954static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
955{
956	struct scsi_device *dev = cmd->device;
957	struct esp *esp = shost_priv(dev->host);
958	struct esp_cmd_priv *spriv;
959	struct esp_cmd_entry *ent;
960
961	ent = esp_get_ent(esp);
962	if (!ent)
963		return SCSI_MLQUEUE_HOST_BUSY;
964
965	ent->cmd = cmd;
966
967	cmd->scsi_done = done;
968
969	spriv = ESP_CMD_PRIV(cmd);
970	spriv->u.dma_addr = ~(dma_addr_t)0x0;
971
972	list_add_tail(&ent->list, &esp->queued_cmds);
973
974	esp_maybe_execute_command(esp);
975
976	return 0;
977}
978
979static DEF_SCSI_QCMD(esp_queuecommand)
980
981static int esp_check_gross_error(struct esp *esp)
982{
983	if (esp->sreg & ESP_STAT_SPAM) {
984		/* Gross Error, could be one of:
985		 * - top of fifo overwritten
986		 * - top of command register overwritten
987		 * - DMA programmed with wrong direction
988		 * - improper phase change
989		 */
990		shost_printk(KERN_ERR, esp->host,
991			     "Gross error sreg[%02x]\n", esp->sreg);
992		/* XXX Reset the chip. XXX */
993		return 1;
994	}
995	return 0;
996}
997
998static int esp_check_spur_intr(struct esp *esp)
999{
1000	switch (esp->rev) {
1001	case ESP100:
1002	case ESP100A:
1003		/* The interrupt pending bit of the status register cannot
1004		 * be trusted on these revisions.
1005		 */
1006		esp->sreg &= ~ESP_STAT_INTR;
1007		break;
1008
1009	default:
1010		if (!(esp->sreg & ESP_STAT_INTR)) {
1011			if (esp->ireg & ESP_INTR_SR)
1012				return 1;
1013
1014			/* If the DMA is indicating interrupt pending and the
1015			 * ESP is not, the only possibility is a DMA error.
1016			 */
1017			if (!esp->ops->dma_error(esp)) {
1018				shost_printk(KERN_ERR, esp->host,
1019					     "Spurious irq, sreg=%02x.\n",
1020					     esp->sreg);
1021				return -1;
1022			}
1023
1024			shost_printk(KERN_ERR, esp->host, "DMA error\n");
1025
1026			/* XXX Reset the chip. XXX */
1027			return -1;
1028		}
1029		break;
1030	}
1031
1032	return 0;
1033}
1034
1035static void esp_schedule_reset(struct esp *esp)
1036{
1037	esp_log_reset("esp_schedule_reset() from %pf\n",
1038		      __builtin_return_address(0));
1039	esp->flags |= ESP_FLAG_RESETTING;
1040	esp_event(esp, ESP_EVENT_RESET);
1041}
1042
1043/* In order to avoid having to add a special half-reconnected state
1044 * into the driver we just sit here and poll through the rest of
1045 * the reselection process to get the tag message bytes.
1046 */
1047static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1048						    struct esp_lun_data *lp)
1049{
1050	struct esp_cmd_entry *ent;
1051	int i;
1052
1053	if (!lp->num_tagged) {
1054		shost_printk(KERN_ERR, esp->host,
1055			     "Reconnect w/num_tagged==0\n");
1056		return NULL;
1057	}
1058
1059	esp_log_reconnect("reconnect tag, ");
1060
1061	for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1062		if (esp->ops->irq_pending(esp))
1063			break;
1064	}
1065	if (i == ESP_QUICKIRQ_LIMIT) {
1066		shost_printk(KERN_ERR, esp->host,
1067			     "Reconnect IRQ1 timeout\n");
1068		return NULL;
1069	}
1070
1071	esp->sreg = esp_read8(ESP_STATUS);
1072	esp->ireg = esp_read8(ESP_INTRPT);
1073
1074	esp_log_reconnect("IRQ(%d:%x:%x), ",
1075			  i, esp->ireg, esp->sreg);
1076
1077	if (esp->ireg & ESP_INTR_DC) {
1078		shost_printk(KERN_ERR, esp->host,
1079			     "Reconnect, got disconnect.\n");
1080		return NULL;
1081	}
1082
1083	if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1084		shost_printk(KERN_ERR, esp->host,
1085			     "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1086		return NULL;
1087	}
1088
1089	/* DMA in the tag bytes... */
1090	esp->command_block[0] = 0xff;
1091	esp->command_block[1] = 0xff;
1092	esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1093			       2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1094
1095	/* ACK the message.  */
1096	scsi_esp_cmd(esp, ESP_CMD_MOK);
1097
1098	for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1099		if (esp->ops->irq_pending(esp)) {
1100			esp->sreg = esp_read8(ESP_STATUS);
1101			esp->ireg = esp_read8(ESP_INTRPT);
1102			if (esp->ireg & ESP_INTR_FDONE)
1103				break;
1104		}
1105		udelay(1);
1106	}
1107	if (i == ESP_RESELECT_TAG_LIMIT) {
1108		shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1109		return NULL;
1110	}
1111	esp->ops->dma_drain(esp);
1112	esp->ops->dma_invalidate(esp);
1113
1114	esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1115			  i, esp->ireg, esp->sreg,
1116			  esp->command_block[0],
1117			  esp->command_block[1]);
1118
1119	if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1120	    esp->command_block[0] > ORDERED_QUEUE_TAG) {
1121		shost_printk(KERN_ERR, esp->host,
1122			     "Reconnect, bad tag type %02x.\n",
1123			     esp->command_block[0]);
1124		return NULL;
1125	}
1126
1127	ent = lp->tagged_cmds[esp->command_block[1]];
1128	if (!ent) {
1129		shost_printk(KERN_ERR, esp->host,
1130			     "Reconnect, no entry for tag %02x.\n",
1131			     esp->command_block[1]);
1132		return NULL;
1133	}
1134
1135	return ent;
1136}
1137
1138static int esp_reconnect(struct esp *esp)
1139{
1140	struct esp_cmd_entry *ent;
1141	struct esp_target_data *tp;
1142	struct esp_lun_data *lp;
1143	struct scsi_device *dev;
1144	int target, lun;
1145
1146	BUG_ON(esp->active_cmd);
1147	if (esp->rev == FASHME) {
1148		/* FASHME puts the target and lun numbers directly
1149		 * into the fifo.
1150		 */
1151		target = esp->fifo[0];
1152		lun = esp->fifo[1] & 0x7;
1153	} else {
1154		u8 bits = esp_read8(ESP_FDATA);
1155
1156		/* Older chips put the lun directly into the fifo, but
1157		 * the target is given as a sample of the arbitration
1158		 * lines on the bus at reselection time.  So we should
1159		 * see the ID of the ESP and the one reconnecting target
1160		 * set in the bitmap.
1161		 */
1162		if (!(bits & esp->scsi_id_mask))
1163			goto do_reset;
1164		bits &= ~esp->scsi_id_mask;
1165		if (!bits || (bits & (bits - 1)))
1166			goto do_reset;
1167
1168		target = ffs(bits) - 1;
1169		lun = (esp_read8(ESP_FDATA) & 0x7);
1170
1171		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1172		if (esp->rev == ESP100) {
1173			u8 ireg = esp_read8(ESP_INTRPT);
1174			/* This chip has a bug during reselection that can
1175			 * cause a spurious illegal-command interrupt, which
1176			 * we simply ACK here.  Another possibility is a bus
1177			 * reset so we must check for that.
1178			 */
1179			if (ireg & ESP_INTR_SR)
1180				goto do_reset;
1181		}
1182		scsi_esp_cmd(esp, ESP_CMD_NULL);
1183	}
1184
1185	esp_write_tgt_sync(esp, target);
1186	esp_write_tgt_config3(esp, target);
1187
1188	scsi_esp_cmd(esp, ESP_CMD_MOK);
1189
1190	if (esp->rev == FASHME)
1191		esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1192			   ESP_BUSID);
1193
1194	tp = &esp->target[target];
1195	dev = __scsi_device_lookup_by_target(tp->starget, lun);
1196	if (!dev) {
1197		shost_printk(KERN_ERR, esp->host,
1198			     "Reconnect, no lp tgt[%u] lun[%u]\n",
1199			     target, lun);
1200		goto do_reset;
1201	}
1202	lp = dev->hostdata;
1203
1204	ent = lp->non_tagged_cmd;
1205	if (!ent) {
1206		ent = esp_reconnect_with_tag(esp, lp);
1207		if (!ent)
1208			goto do_reset;
1209	}
1210
1211	esp->active_cmd = ent;
1212
1213	if (ent->flags & ESP_CMD_FLAG_ABORT) {
1214		esp->msg_out[0] = ABORT_TASK_SET;
1215		esp->msg_out_len = 1;
1216		scsi_esp_cmd(esp, ESP_CMD_SATN);
1217	}
1218
1219	esp_event(esp, ESP_EVENT_CHECK_PHASE);
1220	esp_restore_pointers(esp, ent);
1221	esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1222	return 1;
1223
1224do_reset:
1225	esp_schedule_reset(esp);
1226	return 0;
1227}
1228
1229static int esp_finish_select(struct esp *esp)
1230{
1231	struct esp_cmd_entry *ent;
1232	struct scsi_cmnd *cmd;
1233	u8 orig_select_state;
1234
1235	orig_select_state = esp->select_state;
1236
1237	/* No longer selecting.  */
1238	esp->select_state = ESP_SELECT_NONE;
1239
1240	esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1241	ent = esp->active_cmd;
1242	cmd = ent->cmd;
1243
1244	if (esp->ops->dma_error(esp)) {
1245		/* If we see a DMA error during or as a result of selection,
1246		 * all bets are off.
1247		 */
1248		esp_schedule_reset(esp);
1249		esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1250		return 0;
1251	}
1252
1253	esp->ops->dma_invalidate(esp);
1254
1255	if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1256		struct esp_target_data *tp = &esp->target[cmd->device->id];
1257
1258		/* Carefully back out of the selection attempt.  Release
1259		 * resources (such as DMA mapping & TAG) and reset state (such
1260		 * as message out and command delivery variables).
1261		 */
1262		if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1263			esp_unmap_dma(esp, cmd);
1264			esp_free_lun_tag(ent, cmd->device->hostdata);
1265			tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1266			esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1267			esp->cmd_bytes_ptr = NULL;
1268			esp->cmd_bytes_left = 0;
1269		} else {
1270			esp->ops->unmap_single(esp, ent->sense_dma,
1271					       SCSI_SENSE_BUFFERSIZE,
1272					       DMA_FROM_DEVICE);
1273			ent->sense_ptr = NULL;
1274		}
1275
1276		/* Now that the state is unwound properly, put back onto
1277		 * the issue queue.  This command is no longer active.
1278		 */
1279		list_move(&ent->list, &esp->queued_cmds);
1280		esp->active_cmd = NULL;
1281
1282		/* Return value ignored by caller, it directly invokes
1283		 * esp_reconnect().
1284		 */
1285		return 0;
1286	}
1287
1288	if (esp->ireg == ESP_INTR_DC) {
1289		struct scsi_device *dev = cmd->device;
1290
1291		/* Disconnect.  Make sure we re-negotiate sync and
1292		 * wide parameters if this target starts responding
1293		 * again in the future.
1294		 */
1295		esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1296
1297		scsi_esp_cmd(esp, ESP_CMD_ESEL);
1298		esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1299		return 1;
1300	}
1301
1302	if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1303		/* Selection successful.  On pre-FAST chips we have
1304		 * to do a NOP and possibly clean out the FIFO.
1305		 */
1306		if (esp->rev <= ESP236) {
1307			int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1308
1309			scsi_esp_cmd(esp, ESP_CMD_NULL);
1310
1311			if (!fcnt &&
1312			    (!esp->prev_soff ||
1313			     ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1314				esp_flush_fifo(esp);
1315		}
1316
1317		/* If we are doing a slow command, negotiation, etc.
1318		 * we'll do the right thing as we transition to the
1319		 * next phase.
1320		 */
1321		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1322		return 0;
1323	}
1324
1325	shost_printk(KERN_INFO, esp->host,
1326		     "Unexpected selection completion ireg[%x]\n", esp->ireg);
1327	esp_schedule_reset(esp);
1328	return 0;
1329}
1330
1331static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1332			       struct scsi_cmnd *cmd)
1333{
1334	int fifo_cnt, ecount, bytes_sent, flush_fifo;
1335
1336	fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1337	if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1338		fifo_cnt <<= 1;
1339
1340	ecount = 0;
1341	if (!(esp->sreg & ESP_STAT_TCNT)) {
1342		ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1343			  (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1344		if (esp->rev == FASHME)
1345			ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1346		if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1347			ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1348	}
1349
1350	bytes_sent = esp->data_dma_len;
1351	bytes_sent -= ecount;
1352
1353	/*
1354	 * The am53c974 has a DMA 'pecularity'. The doc states:
1355	 * In some odd byte conditions, one residual byte will
1356	 * be left in the SCSI FIFO, and the FIFO Flags will
1357	 * never count to '0 '. When this happens, the residual
1358	 * byte should be retrieved via PIO following completion
1359	 * of the BLAST operation.
1360	 */
1361	if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1362		size_t count = 1;
1363		size_t offset = bytes_sent;
1364		u8 bval = esp_read8(ESP_FDATA);
1365
1366		if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1367			ent->sense_ptr[bytes_sent] = bval;
1368		else {
1369			struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1370			u8 *ptr;
1371
1372			ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1373						  &offset, &count);
1374			if (likely(ptr)) {
1375				*(ptr + offset) = bval;
1376				scsi_kunmap_atomic_sg(ptr);
1377			}
1378		}
1379		bytes_sent += fifo_cnt;
1380		ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1381	}
1382	if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1383		bytes_sent -= fifo_cnt;
1384
1385	flush_fifo = 0;
1386	if (!esp->prev_soff) {
1387		/* Synchronous data transfer, always flush fifo. */
1388		flush_fifo = 1;
1389	} else {
1390		if (esp->rev == ESP100) {
1391			u32 fflags, phase;
1392
1393			/* ESP100 has a chip bug where in the synchronous data
1394			 * phase it can mistake a final long REQ pulse from the
1395			 * target as an extra data byte.  Fun.
1396			 *
1397			 * To detect this case we resample the status register
1398			 * and fifo flags.  If we're still in a data phase and
1399			 * we see spurious chunks in the fifo, we return error
1400			 * to the caller which should reset and set things up
1401			 * such that we only try future transfers to this
1402			 * target in synchronous mode.
1403			 */
1404			esp->sreg = esp_read8(ESP_STATUS);
1405			phase = esp->sreg & ESP_STAT_PMASK;
1406			fflags = esp_read8(ESP_FFLAGS);
1407
1408			if ((phase == ESP_DOP &&
1409			     (fflags & ESP_FF_ONOTZERO)) ||
1410			    (phase == ESP_DIP &&
1411			     (fflags & ESP_FF_FBYTES)))
1412				return -1;
1413		}
1414		if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1415			flush_fifo = 1;
1416	}
1417
1418	if (flush_fifo)
1419		esp_flush_fifo(esp);
1420
1421	return bytes_sent;
1422}
1423
1424static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1425			u8 scsi_period, u8 scsi_offset,
1426			u8 esp_stp, u8 esp_soff)
1427{
1428	spi_period(tp->starget) = scsi_period;
1429	spi_offset(tp->starget) = scsi_offset;
1430	spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1431
1432	if (esp_soff) {
1433		esp_stp &= 0x1f;
1434		esp_soff |= esp->radelay;
1435		if (esp->rev >= FAS236) {
1436			u8 bit = ESP_CONFIG3_FSCSI;
1437			if (esp->rev >= FAS100A)
1438				bit = ESP_CONFIG3_FAST;
1439
1440			if (scsi_period < 50) {
1441				if (esp->rev == FASHME)
1442					esp_soff &= ~esp->radelay;
1443				tp->esp_config3 |= bit;
1444			} else {
1445				tp->esp_config3 &= ~bit;
1446			}
1447			esp->prev_cfg3 = tp->esp_config3;
1448			esp_write8(esp->prev_cfg3, ESP_CFG3);
1449		}
1450	}
1451
1452	tp->esp_period = esp->prev_stp = esp_stp;
1453	tp->esp_offset = esp->prev_soff = esp_soff;
1454
1455	esp_write8(esp_soff, ESP_SOFF);
1456	esp_write8(esp_stp, ESP_STP);
1457
1458	tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1459
1460	spi_display_xfer_agreement(tp->starget);
1461}
1462
1463static void esp_msgin_reject(struct esp *esp)
1464{
1465	struct esp_cmd_entry *ent = esp->active_cmd;
1466	struct scsi_cmnd *cmd = ent->cmd;
1467	struct esp_target_data *tp;
1468	int tgt;
1469
1470	tgt = cmd->device->id;
1471	tp = &esp->target[tgt];
1472
1473	if (tp->flags & ESP_TGT_NEGO_WIDE) {
1474		tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1475
1476		if (!esp_need_to_nego_sync(tp)) {
1477			tp->flags &= ~ESP_TGT_CHECK_NEGO;
1478			scsi_esp_cmd(esp, ESP_CMD_RATN);
1479		} else {
1480			esp->msg_out_len =
1481				spi_populate_sync_msg(&esp->msg_out[0],
1482						      tp->nego_goal_period,
1483						      tp->nego_goal_offset);
1484			tp->flags |= ESP_TGT_NEGO_SYNC;
1485			scsi_esp_cmd(esp, ESP_CMD_SATN);
1486		}
1487		return;
1488	}
1489
1490	if (tp->flags & ESP_TGT_NEGO_SYNC) {
1491		tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1492		tp->esp_period = 0;
1493		tp->esp_offset = 0;
1494		esp_setsync(esp, tp, 0, 0, 0, 0);
1495		scsi_esp_cmd(esp, ESP_CMD_RATN);
1496		return;
1497	}
1498
1499	esp->msg_out[0] = ABORT_TASK_SET;
1500	esp->msg_out_len = 1;
1501	scsi_esp_cmd(esp, ESP_CMD_SATN);
1502}
1503
1504static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1505{
1506	u8 period = esp->msg_in[3];
1507	u8 offset = esp->msg_in[4];
1508	u8 stp;
1509
1510	if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1511		goto do_reject;
1512
1513	if (offset > 15)
1514		goto do_reject;
1515
1516	if (offset) {
1517		int one_clock;
1518
1519		if (period > esp->max_period) {
1520			period = offset = 0;
1521			goto do_sdtr;
1522		}
1523		if (period < esp->min_period)
1524			goto do_reject;
1525
1526		one_clock = esp->ccycle / 1000;
1527		stp = DIV_ROUND_UP(period << 2, one_clock);
1528		if (stp && esp->rev >= FAS236) {
1529			if (stp >= 50)
1530				stp--;
1531		}
1532	} else {
1533		stp = 0;
1534	}
1535
1536	esp_setsync(esp, tp, period, offset, stp, offset);
1537	return;
1538
1539do_reject:
1540	esp->msg_out[0] = MESSAGE_REJECT;
1541	esp->msg_out_len = 1;
1542	scsi_esp_cmd(esp, ESP_CMD_SATN);
1543	return;
1544
1545do_sdtr:
1546	tp->nego_goal_period = period;
1547	tp->nego_goal_offset = offset;
1548	esp->msg_out_len =
1549		spi_populate_sync_msg(&esp->msg_out[0],
1550				      tp->nego_goal_period,
1551				      tp->nego_goal_offset);
1552	scsi_esp_cmd(esp, ESP_CMD_SATN);
1553}
1554
1555static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1556{
1557	int size = 8 << esp->msg_in[3];
1558	u8 cfg3;
1559
1560	if (esp->rev != FASHME)
1561		goto do_reject;
1562
1563	if (size != 8 && size != 16)
1564		goto do_reject;
1565
1566	if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1567		goto do_reject;
1568
1569	cfg3 = tp->esp_config3;
1570	if (size == 16) {
1571		tp->flags |= ESP_TGT_WIDE;
1572		cfg3 |= ESP_CONFIG3_EWIDE;
1573	} else {
1574		tp->flags &= ~ESP_TGT_WIDE;
1575		cfg3 &= ~ESP_CONFIG3_EWIDE;
1576	}
1577	tp->esp_config3 = cfg3;
1578	esp->prev_cfg3 = cfg3;
1579	esp_write8(cfg3, ESP_CFG3);
1580
1581	tp->flags &= ~ESP_TGT_NEGO_WIDE;
1582
1583	spi_period(tp->starget) = 0;
1584	spi_offset(tp->starget) = 0;
1585	if (!esp_need_to_nego_sync(tp)) {
1586		tp->flags &= ~ESP_TGT_CHECK_NEGO;
1587		scsi_esp_cmd(esp, ESP_CMD_RATN);
1588	} else {
1589		esp->msg_out_len =
1590			spi_populate_sync_msg(&esp->msg_out[0],
1591					      tp->nego_goal_period,
1592					      tp->nego_goal_offset);
1593		tp->flags |= ESP_TGT_NEGO_SYNC;
1594		scsi_esp_cmd(esp, ESP_CMD_SATN);
1595	}
1596	return;
1597
1598do_reject:
1599	esp->msg_out[0] = MESSAGE_REJECT;
1600	esp->msg_out_len = 1;
1601	scsi_esp_cmd(esp, ESP_CMD_SATN);
1602}
1603
1604static void esp_msgin_extended(struct esp *esp)
1605{
1606	struct esp_cmd_entry *ent = esp->active_cmd;
1607	struct scsi_cmnd *cmd = ent->cmd;
1608	struct esp_target_data *tp;
1609	int tgt = cmd->device->id;
1610
1611	tp = &esp->target[tgt];
1612	if (esp->msg_in[2] == EXTENDED_SDTR) {
1613		esp_msgin_sdtr(esp, tp);
1614		return;
1615	}
1616	if (esp->msg_in[2] == EXTENDED_WDTR) {
1617		esp_msgin_wdtr(esp, tp);
1618		return;
1619	}
1620
1621	shost_printk(KERN_INFO, esp->host,
1622		     "Unexpected extended msg type %x\n", esp->msg_in[2]);
1623
1624	esp->msg_out[0] = ABORT_TASK_SET;
1625	esp->msg_out_len = 1;
1626	scsi_esp_cmd(esp, ESP_CMD_SATN);
1627}
1628
1629/* Analyze msgin bytes received from target so far.  Return non-zero
1630 * if there are more bytes needed to complete the message.
1631 */
1632static int esp_msgin_process(struct esp *esp)
1633{
1634	u8 msg0 = esp->msg_in[0];
1635	int len = esp->msg_in_len;
1636
1637	if (msg0 & 0x80) {
1638		/* Identify */
1639		shost_printk(KERN_INFO, esp->host,
1640			     "Unexpected msgin identify\n");
1641		return 0;
1642	}
1643
1644	switch (msg0) {
1645	case EXTENDED_MESSAGE:
1646		if (len == 1)
1647			return 1;
1648		if (len < esp->msg_in[1] + 2)
1649			return 1;
1650		esp_msgin_extended(esp);
1651		return 0;
1652
1653	case IGNORE_WIDE_RESIDUE: {
1654		struct esp_cmd_entry *ent;
1655		struct esp_cmd_priv *spriv;
1656		if (len == 1)
1657			return 1;
1658
1659		if (esp->msg_in[1] != 1)
1660			goto do_reject;
1661
1662		ent = esp->active_cmd;
1663		spriv = ESP_CMD_PRIV(ent->cmd);
1664
1665		if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1666			spriv->cur_sg--;
1667			spriv->cur_residue = 1;
1668		} else
1669			spriv->cur_residue++;
1670		spriv->tot_residue++;
1671		return 0;
1672	}
1673	case NOP:
1674		return 0;
1675	case RESTORE_POINTERS:
1676		esp_restore_pointers(esp, esp->active_cmd);
1677		return 0;
1678	case SAVE_POINTERS:
1679		esp_save_pointers(esp, esp->active_cmd);
1680		return 0;
1681
1682	case COMMAND_COMPLETE:
1683	case DISCONNECT: {
1684		struct esp_cmd_entry *ent = esp->active_cmd;
1685
1686		ent->message = msg0;
1687		esp_event(esp, ESP_EVENT_FREE_BUS);
1688		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1689		return 0;
1690	}
1691	case MESSAGE_REJECT:
1692		esp_msgin_reject(esp);
1693		return 0;
1694
1695	default:
1696	do_reject:
1697		esp->msg_out[0] = MESSAGE_REJECT;
1698		esp->msg_out_len = 1;
1699		scsi_esp_cmd(esp, ESP_CMD_SATN);
1700		return 0;
1701	}
1702}
1703
1704static int esp_process_event(struct esp *esp)
1705{
1706	int write, i;
1707
1708again:
1709	write = 0;
1710	esp_log_event("process event %d phase %x\n",
1711		      esp->event, esp->sreg & ESP_STAT_PMASK);
1712	switch (esp->event) {
1713	case ESP_EVENT_CHECK_PHASE:
1714		switch (esp->sreg & ESP_STAT_PMASK) {
1715		case ESP_DOP:
1716			esp_event(esp, ESP_EVENT_DATA_OUT);
1717			break;
1718		case ESP_DIP:
1719			esp_event(esp, ESP_EVENT_DATA_IN);
1720			break;
1721		case ESP_STATP:
1722			esp_flush_fifo(esp);
1723			scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1724			esp_event(esp, ESP_EVENT_STATUS);
1725			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1726			return 1;
1727
1728		case ESP_MOP:
1729			esp_event(esp, ESP_EVENT_MSGOUT);
1730			break;
1731
1732		case ESP_MIP:
1733			esp_event(esp, ESP_EVENT_MSGIN);
1734			break;
1735
1736		case ESP_CMDP:
1737			esp_event(esp, ESP_EVENT_CMD_START);
1738			break;
1739
1740		default:
1741			shost_printk(KERN_INFO, esp->host,
1742				     "Unexpected phase, sreg=%02x\n",
1743				     esp->sreg);
1744			esp_schedule_reset(esp);
1745			return 0;
1746		}
1747		goto again;
1748		break;
1749
1750	case ESP_EVENT_DATA_IN:
1751		write = 1;
1752		/* fallthru */
1753
1754	case ESP_EVENT_DATA_OUT: {
1755		struct esp_cmd_entry *ent = esp->active_cmd;
1756		struct scsi_cmnd *cmd = ent->cmd;
1757		dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1758		unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1759
1760		if (esp->rev == ESP100)
1761			scsi_esp_cmd(esp, ESP_CMD_NULL);
1762
1763		if (write)
1764			ent->flags |= ESP_CMD_FLAG_WRITE;
1765		else
1766			ent->flags &= ~ESP_CMD_FLAG_WRITE;
1767
1768		if (esp->ops->dma_length_limit)
1769			dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1770							     dma_len);
1771		else
1772			dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1773
1774		esp->data_dma_len = dma_len;
1775
1776		if (!dma_len) {
1777			shost_printk(KERN_ERR, esp->host,
1778				     "DMA length is zero!\n");
1779			shost_printk(KERN_ERR, esp->host,
1780				     "cur adr[%08llx] len[%08x]\n",
1781				     (unsigned long long)esp_cur_dma_addr(ent, cmd),
1782				     esp_cur_dma_len(ent, cmd));
1783			esp_schedule_reset(esp);
1784			return 0;
1785		}
1786
1787		esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1788				  (unsigned long long)dma_addr, dma_len, write);
1789
1790		esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1791				       write, ESP_CMD_DMA | ESP_CMD_TI);
1792		esp_event(esp, ESP_EVENT_DATA_DONE);
1793		break;
1794	}
1795	case ESP_EVENT_DATA_DONE: {
1796		struct esp_cmd_entry *ent = esp->active_cmd;
1797		struct scsi_cmnd *cmd = ent->cmd;
1798		int bytes_sent;
1799
1800		if (esp->ops->dma_error(esp)) {
1801			shost_printk(KERN_INFO, esp->host,
1802				     "data done, DMA error, resetting\n");
1803			esp_schedule_reset(esp);
1804			return 0;
1805		}
1806
1807		if (ent->flags & ESP_CMD_FLAG_WRITE) {
1808			/* XXX parity errors, etc. XXX */
1809
1810			esp->ops->dma_drain(esp);
1811		}
1812		esp->ops->dma_invalidate(esp);
1813
1814		if (esp->ireg != ESP_INTR_BSERV) {
1815			/* We should always see exactly a bus-service
1816			 * interrupt at the end of a successful transfer.
1817			 */
1818			shost_printk(KERN_INFO, esp->host,
1819				     "data done, not BSERV, resetting\n");
1820			esp_schedule_reset(esp);
1821			return 0;
1822		}
1823
1824		bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1825
1826		esp_log_datadone("data done flgs[%x] sent[%d]\n",
1827				 ent->flags, bytes_sent);
1828
1829		if (bytes_sent < 0) {
1830			/* XXX force sync mode for this target XXX */
1831			esp_schedule_reset(esp);
1832			return 0;
1833		}
1834
1835		esp_advance_dma(esp, ent, cmd, bytes_sent);
1836		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1837		goto again;
1838	}
1839
1840	case ESP_EVENT_STATUS: {
1841		struct esp_cmd_entry *ent = esp->active_cmd;
1842
1843		if (esp->ireg & ESP_INTR_FDONE) {
1844			ent->status = esp_read8(ESP_FDATA);
1845			ent->message = esp_read8(ESP_FDATA);
1846			scsi_esp_cmd(esp, ESP_CMD_MOK);
1847		} else if (esp->ireg == ESP_INTR_BSERV) {
1848			ent->status = esp_read8(ESP_FDATA);
1849			ent->message = 0xff;
1850			esp_event(esp, ESP_EVENT_MSGIN);
1851			return 0;
1852		}
1853
1854		if (ent->message != COMMAND_COMPLETE) {
1855			shost_printk(KERN_INFO, esp->host,
1856				     "Unexpected message %x in status\n",
1857				     ent->message);
1858			esp_schedule_reset(esp);
1859			return 0;
1860		}
1861
1862		esp_event(esp, ESP_EVENT_FREE_BUS);
1863		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1864		break;
1865	}
1866	case ESP_EVENT_FREE_BUS: {
1867		struct esp_cmd_entry *ent = esp->active_cmd;
1868		struct scsi_cmnd *cmd = ent->cmd;
1869
1870		if (ent->message == COMMAND_COMPLETE ||
1871		    ent->message == DISCONNECT)
1872			scsi_esp_cmd(esp, ESP_CMD_ESEL);
1873
1874		if (ent->message == COMMAND_COMPLETE) {
1875			esp_log_cmddone("Command done status[%x] message[%x]\n",
1876					ent->status, ent->message);
1877			if (ent->status == SAM_STAT_TASK_SET_FULL)
1878				esp_event_queue_full(esp, ent);
1879
1880			if (ent->status == SAM_STAT_CHECK_CONDITION &&
1881			    !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1882				ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1883				esp_autosense(esp, ent);
1884			} else {
1885				esp_cmd_is_done(esp, ent, cmd,
1886						compose_result(ent->status,
1887							       ent->message,
1888							       DID_OK));
1889			}
1890		} else if (ent->message == DISCONNECT) {
1891			esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1892					   cmd->device->id,
1893					   ent->tag[0], ent->tag[1]);
1894
1895			esp->active_cmd = NULL;
1896			esp_maybe_execute_command(esp);
1897		} else {
1898			shost_printk(KERN_INFO, esp->host,
1899				     "Unexpected message %x in freebus\n",
1900				     ent->message);
1901			esp_schedule_reset(esp);
1902			return 0;
1903		}
1904		if (esp->active_cmd)
1905			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1906		break;
1907	}
1908	case ESP_EVENT_MSGOUT: {
1909		scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1910
1911		if (esp_debug & ESP_DEBUG_MSGOUT) {
1912			int i;
1913			printk("ESP: Sending message [ ");
1914			for (i = 0; i < esp->msg_out_len; i++)
1915				printk("%02x ", esp->msg_out[i]);
1916			printk("]\n");
1917		}
1918
1919		if (esp->rev == FASHME) {
1920			int i;
1921
1922			/* Always use the fifo.  */
1923			for (i = 0; i < esp->msg_out_len; i++) {
1924				esp_write8(esp->msg_out[i], ESP_FDATA);
1925				esp_write8(0, ESP_FDATA);
1926			}
1927			scsi_esp_cmd(esp, ESP_CMD_TI);
1928		} else {
1929			if (esp->msg_out_len == 1) {
1930				esp_write8(esp->msg_out[0], ESP_FDATA);
1931				scsi_esp_cmd(esp, ESP_CMD_TI);
1932			} else if (esp->flags & ESP_FLAG_USE_FIFO) {
1933				for (i = 0; i < esp->msg_out_len; i++)
1934					esp_write8(esp->msg_out[i], ESP_FDATA);
1935				scsi_esp_cmd(esp, ESP_CMD_TI);
1936			} else {
1937				/* Use DMA. */
1938				memcpy(esp->command_block,
1939				       esp->msg_out,
1940				       esp->msg_out_len);
1941
1942				esp->ops->send_dma_cmd(esp,
1943						       esp->command_block_dma,
1944						       esp->msg_out_len,
1945						       esp->msg_out_len,
1946						       0,
1947						       ESP_CMD_DMA|ESP_CMD_TI);
1948			}
1949		}
1950		esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1951		break;
1952	}
1953	case ESP_EVENT_MSGOUT_DONE:
1954		if (esp->rev == FASHME) {
1955			scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1956		} else {
1957			if (esp->msg_out_len > 1)
1958				esp->ops->dma_invalidate(esp);
1959		}
1960
1961		if (!(esp->ireg & ESP_INTR_DC)) {
1962			if (esp->rev != FASHME)
1963				scsi_esp_cmd(esp, ESP_CMD_NULL);
1964		}
1965		esp_event(esp, ESP_EVENT_CHECK_PHASE);
1966		goto again;
1967	case ESP_EVENT_MSGIN:
1968		if (esp->ireg & ESP_INTR_BSERV) {
1969			if (esp->rev == FASHME) {
1970				if (!(esp_read8(ESP_STATUS2) &
1971				      ESP_STAT2_FEMPTY))
1972					scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1973			} else {
1974				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1975				if (esp->rev == ESP100)
1976					scsi_esp_cmd(esp, ESP_CMD_NULL);
1977			}
1978			scsi_esp_cmd(esp, ESP_CMD_TI);
1979			esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1980			return 1;
1981		}
1982		if (esp->ireg & ESP_INTR_FDONE) {
1983			u8 val;
1984
1985			if (esp->rev == FASHME)
1986				val = esp->fifo[0];
1987			else
1988				val = esp_read8(ESP_FDATA);
1989			esp->msg_in[esp->msg_in_len++] = val;
1990
1991			esp_log_msgin("Got msgin byte %x\n", val);
1992
1993			if (!esp_msgin_process(esp))
1994				esp->msg_in_len = 0;
1995
1996			if (esp->rev == FASHME)
1997				scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1998
1999			scsi_esp_cmd(esp, ESP_CMD_MOK);
2000
2001			if (esp->event != ESP_EVENT_FREE_BUS)
2002				esp_event(esp, ESP_EVENT_CHECK_PHASE);
2003		} else {
2004			shost_printk(KERN_INFO, esp->host,
2005				     "MSGIN neither BSERV not FDON, resetting");
2006			esp_schedule_reset(esp);
2007			return 0;
2008		}
2009		break;
2010	case ESP_EVENT_CMD_START:
2011		memcpy(esp->command_block, esp->cmd_bytes_ptr,
2012		       esp->cmd_bytes_left);
2013		esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2014		esp_event(esp, ESP_EVENT_CMD_DONE);
2015		esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2016		break;
2017	case ESP_EVENT_CMD_DONE:
2018		esp->ops->dma_invalidate(esp);
2019		if (esp->ireg & ESP_INTR_BSERV) {
2020			esp_event(esp, ESP_EVENT_CHECK_PHASE);
2021			goto again;
2022		}
2023		esp_schedule_reset(esp);
2024		return 0;
2025		break;
2026
2027	case ESP_EVENT_RESET:
2028		scsi_esp_cmd(esp, ESP_CMD_RS);
2029		break;
2030
2031	default:
2032		shost_printk(KERN_INFO, esp->host,
2033			     "Unexpected event %x, resetting\n", esp->event);
2034		esp_schedule_reset(esp);
2035		return 0;
2036		break;
2037	}
2038	return 1;
2039}
2040
2041static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2042{
2043	struct scsi_cmnd *cmd = ent->cmd;
2044
2045	esp_unmap_dma(esp, cmd);
2046	esp_free_lun_tag(ent, cmd->device->hostdata);
2047	cmd->result = DID_RESET << 16;
2048
2049	if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2050		esp->ops->unmap_single(esp, ent->sense_dma,
2051				       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2052		ent->sense_ptr = NULL;
2053	}
2054
2055	cmd->scsi_done(cmd);
2056	list_del(&ent->list);
2057	esp_put_ent(esp, ent);
2058}
2059
2060static void esp_clear_hold(struct scsi_device *dev, void *data)
2061{
2062	struct esp_lun_data *lp = dev->hostdata;
2063
2064	BUG_ON(lp->num_tagged);
2065	lp->hold = 0;
2066}
2067
2068static void esp_reset_cleanup(struct esp *esp)
2069{
2070	struct esp_cmd_entry *ent, *tmp;
2071	int i;
2072
2073	list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2074		struct scsi_cmnd *cmd = ent->cmd;
2075
2076		list_del(&ent->list);
2077		cmd->result = DID_RESET << 16;
2078		cmd->scsi_done(cmd);
2079		esp_put_ent(esp, ent);
2080	}
2081
2082	list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2083		if (ent == esp->active_cmd)
2084			esp->active_cmd = NULL;
2085		esp_reset_cleanup_one(esp, ent);
2086	}
2087
2088	BUG_ON(esp->active_cmd != NULL);
2089
2090	/* Force renegotiation of sync/wide transfers.  */
2091	for (i = 0; i < ESP_MAX_TARGET; i++) {
2092		struct esp_target_data *tp = &esp->target[i];
2093
2094		tp->esp_period = 0;
2095		tp->esp_offset = 0;
2096		tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2097				     ESP_CONFIG3_FSCSI |
2098				     ESP_CONFIG3_FAST);
2099		tp->flags &= ~ESP_TGT_WIDE;
2100		tp->flags |= ESP_TGT_CHECK_NEGO;
2101
2102		if (tp->starget)
2103			__starget_for_each_device(tp->starget, NULL,
2104						  esp_clear_hold);
2105	}
2106	esp->flags &= ~ESP_FLAG_RESETTING;
2107}
2108
2109/* Runs under host->lock */
2110static void __esp_interrupt(struct esp *esp)
2111{
2112	int finish_reset, intr_done;
2113	u8 phase;
2114
2115       /*
2116	* Once INTRPT is read STATUS and SSTEP are cleared.
2117	*/
2118	esp->sreg = esp_read8(ESP_STATUS);
2119	esp->seqreg = esp_read8(ESP_SSTEP);
2120	esp->ireg = esp_read8(ESP_INTRPT);
2121
2122	if (esp->flags & ESP_FLAG_RESETTING) {
2123		finish_reset = 1;
2124	} else {
2125		if (esp_check_gross_error(esp))
2126			return;
2127
2128		finish_reset = esp_check_spur_intr(esp);
2129		if (finish_reset < 0)
2130			return;
2131	}
2132
2133	if (esp->ireg & ESP_INTR_SR)
2134		finish_reset = 1;
2135
2136	if (finish_reset) {
2137		esp_reset_cleanup(esp);
2138		if (esp->eh_reset) {
2139			complete(esp->eh_reset);
2140			esp->eh_reset = NULL;
2141		}
2142		return;
2143	}
2144
2145	phase = (esp->sreg & ESP_STAT_PMASK);
2146	if (esp->rev == FASHME) {
2147		if (((phase != ESP_DIP && phase != ESP_DOP) &&
2148		     esp->select_state == ESP_SELECT_NONE &&
2149		     esp->event != ESP_EVENT_STATUS &&
2150		     esp->event != ESP_EVENT_DATA_DONE) ||
2151		    (esp->ireg & ESP_INTR_RSEL)) {
2152			esp->sreg2 = esp_read8(ESP_STATUS2);
2153			if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2154			    (esp->sreg2 & ESP_STAT2_F1BYTE))
2155				hme_read_fifo(esp);
2156		}
2157	}
2158
2159	esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2160		     "sreg2[%02x] ireg[%02x]\n",
2161		     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2162
2163	intr_done = 0;
2164
2165	if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2166		shost_printk(KERN_INFO, esp->host,
2167			     "unexpected IREG %02x\n", esp->ireg);
2168		if (esp->ireg & ESP_INTR_IC)
2169			esp_dump_cmd_log(esp);
2170
2171		esp_schedule_reset(esp);
2172	} else {
2173		if (!(esp->ireg & ESP_INTR_RSEL)) {
2174			/* Some combination of FDONE, BSERV, DC.  */
2175			if (esp->select_state != ESP_SELECT_NONE)
2176				intr_done = esp_finish_select(esp);
2177		} else if (esp->ireg & ESP_INTR_RSEL) {
2178			if (esp->active_cmd)
2179				(void) esp_finish_select(esp);
2180			intr_done = esp_reconnect(esp);
2181		}
2182	}
2183	while (!intr_done)
2184		intr_done = esp_process_event(esp);
2185}
2186
2187irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2188{
2189	struct esp *esp = dev_id;
2190	unsigned long flags;
2191	irqreturn_t ret;
2192
2193	spin_lock_irqsave(esp->host->host_lock, flags);
2194	ret = IRQ_NONE;
2195	if (esp->ops->irq_pending(esp)) {
2196		ret = IRQ_HANDLED;
2197		for (;;) {
2198			int i;
2199
2200			__esp_interrupt(esp);
2201			if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2202				break;
2203			esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2204
2205			for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2206				if (esp->ops->irq_pending(esp))
2207					break;
2208			}
2209			if (i == ESP_QUICKIRQ_LIMIT)
2210				break;
2211		}
2212	}
2213	spin_unlock_irqrestore(esp->host->host_lock, flags);
2214
2215	return ret;
2216}
2217EXPORT_SYMBOL(scsi_esp_intr);
2218
2219static void esp_get_revision(struct esp *esp)
2220{
2221	u8 val;
2222
2223	esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2224	if (esp->config2 == 0) {
2225		esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2226		esp_write8(esp->config2, ESP_CFG2);
2227
2228		val = esp_read8(ESP_CFG2);
2229		val &= ~ESP_CONFIG2_MAGIC;
2230
2231		esp->config2 = 0;
2232		if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2233			/*
2234			 * If what we write to cfg2 does not come back,
2235			 * cfg2 is not implemented.
2236			 * Therefore this must be a plain esp100.
2237			 */
2238			esp->rev = ESP100;
2239			return;
2240		}
2241	}
2242
2243	esp_set_all_config3(esp, 5);
2244	esp->prev_cfg3 = 5;
2245	esp_write8(esp->config2, ESP_CFG2);
2246	esp_write8(0, ESP_CFG3);
2247	esp_write8(esp->prev_cfg3, ESP_CFG3);
2248
2249	val = esp_read8(ESP_CFG3);
2250	if (val != 5) {
2251		/* The cfg2 register is implemented, however
2252		 * cfg3 is not, must be esp100a.
2253		 */
2254		esp->rev = ESP100A;
2255	} else {
2256		esp_set_all_config3(esp, 0);
2257		esp->prev_cfg3 = 0;
2258		esp_write8(esp->prev_cfg3, ESP_CFG3);
2259
2260		/* All of cfg{1,2,3} implemented, must be one of
2261		 * the fas variants, figure out which one.
2262		 */
2263		if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2264			esp->rev = FAST;
2265			esp->sync_defp = SYNC_DEFP_FAST;
2266		} else {
2267			esp->rev = ESP236;
2268		}
2269	}
2270}
2271
2272static void esp_init_swstate(struct esp *esp)
2273{
2274	int i;
2275
2276	INIT_LIST_HEAD(&esp->queued_cmds);
2277	INIT_LIST_HEAD(&esp->active_cmds);
2278	INIT_LIST_HEAD(&esp->esp_cmd_pool);
2279
2280	/* Start with a clear state, domain validation (via ->slave_configure,
2281	 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2282	 * commands.
2283	 */
2284	for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2285		esp->target[i].flags = 0;
2286		esp->target[i].nego_goal_period = 0;
2287		esp->target[i].nego_goal_offset = 0;
2288		esp->target[i].nego_goal_width = 0;
2289		esp->target[i].nego_goal_tags = 0;
2290	}
2291}
2292
2293/* This places the ESP into a known state at boot time. */
2294static void esp_bootup_reset(struct esp *esp)
2295{
2296	u8 val;
2297
2298	/* Reset the DMA */
2299	esp->ops->reset_dma(esp);
2300
2301	/* Reset the ESP */
2302	esp_reset_esp(esp);
2303
2304	/* Reset the SCSI bus, but tell ESP not to generate an irq */
2305	val = esp_read8(ESP_CFG1);
2306	val |= ESP_CONFIG1_SRRDISAB;
2307	esp_write8(val, ESP_CFG1);
2308
2309	scsi_esp_cmd(esp, ESP_CMD_RS);
2310	udelay(400);
2311
2312	esp_write8(esp->config1, ESP_CFG1);
2313
2314	/* Eat any bitrot in the chip and we are done... */
2315	esp_read8(ESP_INTRPT);
2316}
2317
2318static void esp_set_clock_params(struct esp *esp)
2319{
2320	int fhz;
2321	u8 ccf;
2322
2323	/* This is getting messy but it has to be done correctly or else
2324	 * you get weird behavior all over the place.  We are trying to
2325	 * basically figure out three pieces of information.
2326	 *
2327	 * a) Clock Conversion Factor
2328	 *
2329	 *    This is a representation of the input crystal clock frequency
2330	 *    going into the ESP on this machine.  Any operation whose timing
2331	 *    is longer than 400ns depends on this value being correct.  For
2332	 *    example, you'll get blips for arbitration/selection during high
2333	 *    load or with multiple targets if this is not set correctly.
2334	 *
2335	 * b) Selection Time-Out
2336	 *
2337	 *    The ESP isn't very bright and will arbitrate for the bus and try
2338	 *    to select a target forever if you let it.  This value tells the
2339	 *    ESP when it has taken too long to negotiate and that it should
2340	 *    interrupt the CPU so we can see what happened.  The value is
2341	 *    computed as follows (from NCR/Symbios chip docs).
2342	 *
2343	 *          (Time Out Period) *  (Input Clock)
2344	 *    STO = ----------------------------------
2345	 *          (8192) * (Clock Conversion Factor)
2346	 *
2347	 *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2348	 *
2349	 * c) Imperical constants for synchronous offset and transfer period
2350         *    register values
2351	 *
2352	 *    This entails the smallest and largest sync period we could ever
2353	 *    handle on this ESP.
2354	 */
2355	fhz = esp->cfreq;
2356
2357	ccf = ((fhz / 1000000) + 4) / 5;
2358	if (ccf == 1)
2359		ccf = 2;
2360
2361	/* If we can't find anything reasonable, just assume 20MHZ.
2362	 * This is the clock frequency of the older sun4c's where I've
2363	 * been unable to find the clock-frequency PROM property.  All
2364	 * other machines provide useful values it seems.
2365	 */
2366	if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2367		fhz = 20000000;
2368		ccf = 4;
2369	}
2370
2371	esp->cfact = (ccf == 8 ? 0 : ccf);
2372	esp->cfreq = fhz;
2373	esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2374	esp->ctick = ESP_TICK(ccf, esp->ccycle);
2375	esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2376	esp->sync_defp = SYNC_DEFP_SLOW;
2377}
2378
2379static const char *esp_chip_names[] = {
2380	"ESP100",
2381	"ESP100A",
2382	"ESP236",
2383	"FAS236",
2384	"FAS100A",
2385	"FAST",
2386	"FASHME",
2387	"AM53C974",
2388};
2389
2390static struct scsi_transport_template *esp_transport_template;
2391
2392int scsi_esp_register(struct esp *esp, struct device *dev)
2393{
2394	static int instance;
2395	int err;
2396
2397	if (!esp->num_tags)
2398		esp->num_tags = ESP_DEFAULT_TAGS;
2399	esp->host->transportt = esp_transport_template;
2400	esp->host->max_lun = ESP_MAX_LUN;
2401	esp->host->cmd_per_lun = 2;
2402	esp->host->unique_id = instance;
2403
2404	esp_set_clock_params(esp);
2405
2406	esp_get_revision(esp);
2407
2408	esp_init_swstate(esp);
2409
2410	esp_bootup_reset(esp);
2411
2412	dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2413		   esp->host->unique_id, esp->regs, esp->dma_regs,
2414		   esp->host->irq);
2415	dev_printk(KERN_INFO, dev,
2416		   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2417		   esp->host->unique_id, esp_chip_names[esp->rev],
2418		   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2419
2420	/* Let the SCSI bus reset settle. */
2421	ssleep(esp_bus_reset_settle);
2422
2423	err = scsi_add_host(esp->host, dev);
2424	if (err)
2425		return err;
2426
2427	instance++;
2428
2429	scsi_scan_host(esp->host);
2430
2431	return 0;
2432}
2433EXPORT_SYMBOL(scsi_esp_register);
2434
2435void scsi_esp_unregister(struct esp *esp)
2436{
2437	scsi_remove_host(esp->host);
2438}
2439EXPORT_SYMBOL(scsi_esp_unregister);
2440
2441static int esp_target_alloc(struct scsi_target *starget)
2442{
2443	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2444	struct esp_target_data *tp = &esp->target[starget->id];
2445
2446	tp->starget = starget;
2447
2448	return 0;
2449}
2450
2451static void esp_target_destroy(struct scsi_target *starget)
2452{
2453	struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2454	struct esp_target_data *tp = &esp->target[starget->id];
2455
2456	tp->starget = NULL;
2457}
2458
2459static int esp_slave_alloc(struct scsi_device *dev)
2460{
2461	struct esp *esp = shost_priv(dev->host);
2462	struct esp_target_data *tp = &esp->target[dev->id];
2463	struct esp_lun_data *lp;
2464
2465	lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2466	if (!lp)
2467		return -ENOMEM;
2468	dev->hostdata = lp;
2469
2470	spi_min_period(tp->starget) = esp->min_period;
2471	spi_max_offset(tp->starget) = 15;
2472
2473	if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2474		spi_max_width(tp->starget) = 1;
2475	else
2476		spi_max_width(tp->starget) = 0;
2477
2478	return 0;
2479}
2480
2481static int esp_slave_configure(struct scsi_device *dev)
2482{
2483	struct esp *esp = shost_priv(dev->host);
2484	struct esp_target_data *tp = &esp->target[dev->id];
2485
2486	if (dev->tagged_supported)
2487		scsi_change_queue_depth(dev, esp->num_tags);
2488
2489	tp->flags |= ESP_TGT_DISCONNECT;
2490
2491	if (!spi_initial_dv(dev->sdev_target))
2492		spi_dv_device(dev);
2493
2494	return 0;
2495}
2496
2497static void esp_slave_destroy(struct scsi_device *dev)
2498{
2499	struct esp_lun_data *lp = dev->hostdata;
2500
2501	kfree(lp);
2502	dev->hostdata = NULL;
2503}
2504
2505static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2506{
2507	struct esp *esp = shost_priv(cmd->device->host);
2508	struct esp_cmd_entry *ent, *tmp;
2509	struct completion eh_done;
2510	unsigned long flags;
2511
2512	/* XXX This helps a lot with debugging but might be a bit
2513	 * XXX much for the final driver.
2514	 */
2515	spin_lock_irqsave(esp->host->host_lock, flags);
2516	shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2517		     cmd, cmd->cmnd[0]);
2518	ent = esp->active_cmd;
2519	if (ent)
2520		shost_printk(KERN_ERR, esp->host,
2521			     "Current command [%p:%02x]\n",
2522			     ent->cmd, ent->cmd->cmnd[0]);
2523	list_for_each_entry(ent, &esp->queued_cmds, list) {
2524		shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2525			     ent->cmd, ent->cmd->cmnd[0]);
2526	}
2527	list_for_each_entry(ent, &esp->active_cmds, list) {
2528		shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2529			     ent->cmd, ent->cmd->cmnd[0]);
2530	}
2531	esp_dump_cmd_log(esp);
2532	spin_unlock_irqrestore(esp->host->host_lock, flags);
2533
2534	spin_lock_irqsave(esp->host->host_lock, flags);
2535
2536	ent = NULL;
2537	list_for_each_entry(tmp, &esp->queued_cmds, list) {
2538		if (tmp->cmd == cmd) {
2539			ent = tmp;
2540			break;
2541		}
2542	}
2543
2544	if (ent) {
2545		/* Easiest case, we didn't even issue the command
2546		 * yet so it is trivial to abort.
2547		 */
2548		list_del(&ent->list);
2549
2550		cmd->result = DID_ABORT << 16;
2551		cmd->scsi_done(cmd);
2552
2553		esp_put_ent(esp, ent);
2554
2555		goto out_success;
2556	}
2557
2558	init_completion(&eh_done);
2559
2560	ent = esp->active_cmd;
2561	if (ent && ent->cmd == cmd) {
2562		/* Command is the currently active command on
2563		 * the bus.  If we already have an output message
2564		 * pending, no dice.
2565		 */
2566		if (esp->msg_out_len)
2567			goto out_failure;
2568
2569		/* Send out an abort, encouraging the target to
2570		 * go to MSGOUT phase by asserting ATN.
2571		 */
2572		esp->msg_out[0] = ABORT_TASK_SET;
2573		esp->msg_out_len = 1;
2574		ent->eh_done = &eh_done;
2575
2576		scsi_esp_cmd(esp, ESP_CMD_SATN);
2577	} else {
2578		/* The command is disconnected.  This is not easy to
2579		 * abort.  For now we fail and let the scsi error
2580		 * handling layer go try a scsi bus reset or host
2581		 * reset.
2582		 *
2583		 * What we could do is put together a scsi command
2584		 * solely for the purpose of sending an abort message
2585		 * to the target.  Coming up with all the code to
2586		 * cook up scsi commands, special case them everywhere,
2587		 * etc. is for questionable gain and it would be better
2588		 * if the generic scsi error handling layer could do at
2589		 * least some of that for us.
2590		 *
2591		 * Anyways this is an area for potential future improvement
2592		 * in this driver.
2593		 */
2594		goto out_failure;
2595	}
2596
2597	spin_unlock_irqrestore(esp->host->host_lock, flags);
2598
2599	if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2600		spin_lock_irqsave(esp->host->host_lock, flags);
2601		ent->eh_done = NULL;
2602		spin_unlock_irqrestore(esp->host->host_lock, flags);
2603
2604		return FAILED;
2605	}
2606
2607	return SUCCESS;
2608
2609out_success:
2610	spin_unlock_irqrestore(esp->host->host_lock, flags);
2611	return SUCCESS;
2612
2613out_failure:
2614	/* XXX This might be a good location to set ESP_TGT_BROKEN
2615	 * XXX since we know which target/lun in particular is
2616	 * XXX causing trouble.
2617	 */
2618	spin_unlock_irqrestore(esp->host->host_lock, flags);
2619	return FAILED;
2620}
2621
2622static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2623{
2624	struct esp *esp = shost_priv(cmd->device->host);
2625	struct completion eh_reset;
2626	unsigned long flags;
2627
2628	init_completion(&eh_reset);
2629
2630	spin_lock_irqsave(esp->host->host_lock, flags);
2631
2632	esp->eh_reset = &eh_reset;
2633
2634	/* XXX This is too simple... We should add lots of
2635	 * XXX checks here so that if we find that the chip is
2636	 * XXX very wedged we return failure immediately so
2637	 * XXX that we can perform a full chip reset.
2638	 */
2639	esp->flags |= ESP_FLAG_RESETTING;
2640	scsi_esp_cmd(esp, ESP_CMD_RS);
2641
2642	spin_unlock_irqrestore(esp->host->host_lock, flags);
2643
2644	ssleep(esp_bus_reset_settle);
2645
2646	if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2647		spin_lock_irqsave(esp->host->host_lock, flags);
2648		esp->eh_reset = NULL;
2649		spin_unlock_irqrestore(esp->host->host_lock, flags);
2650
2651		return FAILED;
2652	}
2653
2654	return SUCCESS;
2655}
2656
2657/* All bets are off, reset the entire device.  */
2658static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2659{
2660	struct esp *esp = shost_priv(cmd->device->host);
2661	unsigned long flags;
2662
2663	spin_lock_irqsave(esp->host->host_lock, flags);
2664	esp_bootup_reset(esp);
2665	esp_reset_cleanup(esp);
2666	spin_unlock_irqrestore(esp->host->host_lock, flags);
2667
2668	ssleep(esp_bus_reset_settle);
2669
2670	return SUCCESS;
2671}
2672
2673static const char *esp_info(struct Scsi_Host *host)
2674{
2675	return "esp";
2676}
2677
2678struct scsi_host_template scsi_esp_template = {
2679	.module			= THIS_MODULE,
2680	.name			= "esp",
2681	.info			= esp_info,
2682	.queuecommand		= esp_queuecommand,
2683	.target_alloc		= esp_target_alloc,
2684	.target_destroy		= esp_target_destroy,
2685	.slave_alloc		= esp_slave_alloc,
2686	.slave_configure	= esp_slave_configure,
2687	.slave_destroy		= esp_slave_destroy,
2688	.eh_abort_handler	= esp_eh_abort_handler,
2689	.eh_bus_reset_handler	= esp_eh_bus_reset_handler,
2690	.eh_host_reset_handler	= esp_eh_host_reset_handler,
2691	.can_queue		= 7,
2692	.this_id		= 7,
2693	.sg_tablesize		= SG_ALL,
2694	.use_clustering		= ENABLE_CLUSTERING,
2695	.max_sectors		= 0xffff,
2696	.skip_settle_delay	= 1,
2697	.use_blk_tags		= 1,
2698};
2699EXPORT_SYMBOL(scsi_esp_template);
2700
2701static void esp_get_signalling(struct Scsi_Host *host)
2702{
2703	struct esp *esp = shost_priv(host);
2704	enum spi_signal_type type;
2705
2706	if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2707		type = SPI_SIGNAL_HVD;
2708	else
2709		type = SPI_SIGNAL_SE;
2710
2711	spi_signalling(host) = type;
2712}
2713
2714static void esp_set_offset(struct scsi_target *target, int offset)
2715{
2716	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2717	struct esp *esp = shost_priv(host);
2718	struct esp_target_data *tp = &esp->target[target->id];
2719
2720	if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2721		tp->nego_goal_offset = 0;
2722	else
2723		tp->nego_goal_offset = offset;
2724	tp->flags |= ESP_TGT_CHECK_NEGO;
2725}
2726
2727static void esp_set_period(struct scsi_target *target, int period)
2728{
2729	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2730	struct esp *esp = shost_priv(host);
2731	struct esp_target_data *tp = &esp->target[target->id];
2732
2733	tp->nego_goal_period = period;
2734	tp->flags |= ESP_TGT_CHECK_NEGO;
2735}
2736
2737static void esp_set_width(struct scsi_target *target, int width)
2738{
2739	struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2740	struct esp *esp = shost_priv(host);
2741	struct esp_target_data *tp = &esp->target[target->id];
2742
2743	tp->nego_goal_width = (width ? 1 : 0);
2744	tp->flags |= ESP_TGT_CHECK_NEGO;
2745}
2746
2747static struct spi_function_template esp_transport_ops = {
2748	.set_offset		= esp_set_offset,
2749	.show_offset		= 1,
2750	.set_period		= esp_set_period,
2751	.show_period		= 1,
2752	.set_width		= esp_set_width,
2753	.show_width		= 1,
2754	.get_signalling		= esp_get_signalling,
2755};
2756
2757static int __init esp_init(void)
2758{
2759	BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2760		     sizeof(struct esp_cmd_priv));
2761
2762	esp_transport_template = spi_attach_transport(&esp_transport_ops);
2763	if (!esp_transport_template)
2764		return -ENODEV;
2765
2766	return 0;
2767}
2768
2769static void __exit esp_exit(void)
2770{
2771	spi_release_transport(esp_transport_template);
2772}
2773
2774MODULE_DESCRIPTION("ESP SCSI driver core");
2775MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2776MODULE_LICENSE("GPL");
2777MODULE_VERSION(DRV_VERSION);
2778
2779module_param(esp_bus_reset_settle, int, 0);
2780MODULE_PARM_DESC(esp_bus_reset_settle,
2781		 "ESP scsi bus reset delay in seconds");
2782
2783module_param(esp_debug, int, 0);
2784MODULE_PARM_DESC(esp_debug,
2785"ESP bitmapped debugging message enable value:\n"
2786"	0x00000001	Log interrupt events\n"
2787"	0x00000002	Log scsi commands\n"
2788"	0x00000004	Log resets\n"
2789"	0x00000008	Log message in events\n"
2790"	0x00000010	Log message out events\n"
2791"	0x00000020	Log command completion\n"
2792"	0x00000040	Log disconnects\n"
2793"	0x00000080	Log data start\n"
2794"	0x00000100	Log data done\n"
2795"	0x00000200	Log reconnects\n"
2796"	0x00000400	Log auto-sense data\n"
2797);
2798
2799module_init(esp_init);
2800module_exit(esp_exit);
2801