This source file includes following definitions.
- esp_log_fill_regs
- scsi_esp_cmd
- esp_send_dma_cmd
- esp_event
- esp_dump_cmd_log
- esp_flush_fifo
- hme_read_fifo
- esp_set_all_config3
- esp_reset_esp
- esp_map_dma
- esp_cur_dma_addr
- esp_cur_dma_len
- esp_advance_dma
- esp_unmap_dma
- esp_save_pointers
- esp_restore_pointers
- esp_write_tgt_config3
- esp_write_tgt_sync
- esp_dma_length_limit
- esp_need_to_nego_wide
- esp_need_to_nego_sync
- esp_alloc_lun_tag
- esp_free_lun_tag
- esp_map_sense
- esp_unmap_sense
- esp_autosense
- find_and_prep_issuable_command
- esp_maybe_execute_command
- esp_get_ent
- esp_put_ent
- esp_cmd_is_done
- compose_result
- esp_event_queue_full
- esp_queuecommand_lck
- DEF_SCSI_QCMD
- esp_check_spur_intr
- esp_schedule_reset
- esp_reconnect_with_tag
- esp_reconnect
- esp_finish_select
- esp_data_bytes_sent
- esp_setsync
- esp_msgin_reject
- esp_msgin_sdtr
- esp_msgin_wdtr
- esp_msgin_extended
- esp_msgin_process
- esp_process_event
- esp_reset_cleanup_one
- esp_clear_hold
- esp_reset_cleanup
- __esp_interrupt
- scsi_esp_intr
- esp_get_revision
- esp_init_swstate
- esp_bootup_reset
- esp_set_clock_params
- scsi_esp_register
- scsi_esp_unregister
- esp_target_alloc
- esp_target_destroy
- esp_slave_alloc
- esp_slave_configure
- esp_slave_destroy
- esp_eh_abort_handler
- esp_eh_bus_reset_handler
- esp_eh_host_reset_handler
- esp_info
- esp_get_signalling
- esp_set_offset
- esp_set_period
- esp_set_width
- esp_init
- esp_exit
- esp_wait_for_fifo
- esp_wait_for_intr
- esp_send_pio_cmd
1
2
3
4
5
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/slab.h>
10 #include <linux/delay.h>
11 #include <linux/list.h>
12 #include <linux/completion.h>
13 #include <linux/kallsyms.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/irqreturn.h>
18
19 #include <asm/irq.h>
20 #include <asm/io.h>
21 #include <asm/dma.h>
22
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_tcq.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_transport_spi.h>
30
31 #include "esp_scsi.h"
32
33 #define DRV_MODULE_NAME "esp"
34 #define PFX DRV_MODULE_NAME ": "
35 #define DRV_VERSION "2.000"
36 #define DRV_MODULE_RELDATE "April 19, 2007"
37
38
39 static int esp_bus_reset_settle = 3;
40
41 static u32 esp_debug;
42 #define ESP_DEBUG_INTR 0x00000001
43 #define ESP_DEBUG_SCSICMD 0x00000002
44 #define ESP_DEBUG_RESET 0x00000004
45 #define ESP_DEBUG_MSGIN 0x00000008
46 #define ESP_DEBUG_MSGOUT 0x00000010
47 #define ESP_DEBUG_CMDDONE 0x00000020
48 #define ESP_DEBUG_DISCONNECT 0x00000040
49 #define ESP_DEBUG_DATASTART 0x00000080
50 #define ESP_DEBUG_DATADONE 0x00000100
51 #define ESP_DEBUG_RECONNECT 0x00000200
52 #define ESP_DEBUG_AUTOSENSE 0x00000400
53 #define ESP_DEBUG_EVENT 0x00000800
54 #define ESP_DEBUG_COMMAND 0x00001000
55
56 #define esp_log_intr(f, a...) \
57 do { if (esp_debug & ESP_DEBUG_INTR) \
58 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
59 } while (0)
60
61 #define esp_log_reset(f, a...) \
62 do { if (esp_debug & ESP_DEBUG_RESET) \
63 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
64 } while (0)
65
66 #define esp_log_msgin(f, a...) \
67 do { if (esp_debug & ESP_DEBUG_MSGIN) \
68 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
69 } while (0)
70
71 #define esp_log_msgout(f, a...) \
72 do { if (esp_debug & ESP_DEBUG_MSGOUT) \
73 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
74 } while (0)
75
76 #define esp_log_cmddone(f, a...) \
77 do { if (esp_debug & ESP_DEBUG_CMDDONE) \
78 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
79 } while (0)
80
81 #define esp_log_disconnect(f, a...) \
82 do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
83 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
84 } while (0)
85
86 #define esp_log_datastart(f, a...) \
87 do { if (esp_debug & ESP_DEBUG_DATASTART) \
88 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
89 } while (0)
90
91 #define esp_log_datadone(f, a...) \
92 do { if (esp_debug & ESP_DEBUG_DATADONE) \
93 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
94 } while (0)
95
96 #define esp_log_reconnect(f, a...) \
97 do { if (esp_debug & ESP_DEBUG_RECONNECT) \
98 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
99 } while (0)
100
101 #define esp_log_autosense(f, a...) \
102 do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
104 } while (0)
105
106 #define esp_log_event(f, a...) \
107 do { if (esp_debug & ESP_DEBUG_EVENT) \
108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
109 } while (0)
110
111 #define esp_log_command(f, a...) \
112 do { if (esp_debug & ESP_DEBUG_COMMAND) \
113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
114 } while (0)
115
116 #define esp_read8(REG) esp->ops->esp_read8(esp, REG)
117 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
118
119 static void esp_log_fill_regs(struct esp *esp,
120 struct esp_event_ent *p)
121 {
122 p->sreg = esp->sreg;
123 p->seqreg = esp->seqreg;
124 p->sreg2 = esp->sreg2;
125 p->ireg = esp->ireg;
126 p->select_state = esp->select_state;
127 p->event = esp->event;
128 }
129
130 void scsi_esp_cmd(struct esp *esp, u8 val)
131 {
132 struct esp_event_ent *p;
133 int idx = esp->esp_event_cur;
134
135 p = &esp->esp_event_log[idx];
136 p->type = ESP_EVENT_TYPE_CMD;
137 p->val = val;
138 esp_log_fill_regs(esp, p);
139
140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141
142 esp_log_command("cmd[%02x]\n", val);
143 esp_write8(val, ESP_CMD);
144 }
145 EXPORT_SYMBOL(scsi_esp_cmd);
146
147 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148 {
149 if (esp->flags & ESP_FLAG_USE_FIFO) {
150 int i;
151
152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
153 for (i = 0; i < len; i++)
154 esp_write8(esp->command_block[i], ESP_FDATA);
155 scsi_esp_cmd(esp, cmd);
156 } else {
157 if (esp->rev == FASHME)
158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159 cmd |= ESP_CMD_DMA;
160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
161 len, max_len, 0, cmd);
162 }
163 }
164
165 static void esp_event(struct esp *esp, u8 val)
166 {
167 struct esp_event_ent *p;
168 int idx = esp->esp_event_cur;
169
170 p = &esp->esp_event_log[idx];
171 p->type = ESP_EVENT_TYPE_EVENT;
172 p->val = val;
173 esp_log_fill_regs(esp, p);
174
175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
176
177 esp->event = val;
178 }
179
180 static void esp_dump_cmd_log(struct esp *esp)
181 {
182 int idx = esp->esp_event_cur;
183 int stop = idx;
184
185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
186 do {
187 struct esp_event_ent *p = &esp->esp_event_log[idx];
188
189 shost_printk(KERN_INFO, esp->host,
190 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
191 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192 idx,
193 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
194 p->val, p->sreg, p->seqreg,
195 p->sreg2, p->ireg, p->select_state, p->event);
196
197 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
198 } while (idx != stop);
199 }
200
201 static void esp_flush_fifo(struct esp *esp)
202 {
203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 if (esp->rev == ESP236) {
205 int lim = 1000;
206
207 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208 if (--lim == 0) {
209 shost_printk(KERN_ALERT, esp->host,
210 "ESP_FF_BYTES will not clear!\n");
211 break;
212 }
213 udelay(1);
214 }
215 }
216 }
217
218 static void hme_read_fifo(struct esp *esp)
219 {
220 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
221 int idx = 0;
222
223 while (fcnt--) {
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
226 }
227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
228 esp_write8(0, ESP_FDATA);
229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
231 }
232 esp->fifo_cnt = idx;
233 }
234
235 static void esp_set_all_config3(struct esp *esp, u8 val)
236 {
237 int i;
238
239 for (i = 0; i < ESP_MAX_TARGET; i++)
240 esp->target[i].esp_config3 = val;
241 }
242
243
244 static void esp_reset_esp(struct esp *esp)
245 {
246 u8 family_code, version;
247
248
249 scsi_esp_cmd(esp, ESP_CMD_RC);
250 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
251 if (esp->rev == FAST)
252 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
253 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
254
255
256
257
258 esp->max_period = ((35 * esp->ccycle) / 1000);
259 if (esp->rev == FAST) {
260 version = esp_read8(ESP_UID);
261 family_code = (version & 0xf8) >> 3;
262 if (family_code == 0x02)
263 esp->rev = FAS236;
264 else if (family_code == 0x0a)
265 esp->rev = FASHME;
266 else
267 esp->rev = FAS100A;
268 esp->min_period = ((4 * esp->ccycle) / 1000);
269 } else {
270 esp->min_period = ((5 * esp->ccycle) / 1000);
271 }
272 if (esp->rev == FAS236) {
273
274
275
276
277 u8 config4 = ESP_CONFIG4_GE1;
278 esp_write8(config4, ESP_CFG4);
279 config4 = esp_read8(ESP_CFG4);
280 if (config4 & ESP_CONFIG4_GE1) {
281 esp->rev = PCSCSI;
282 esp_write8(esp->config4, ESP_CFG4);
283 }
284 }
285 esp->max_period = (esp->max_period + 3)>>2;
286 esp->min_period = (esp->min_period + 3)>>2;
287
288 esp_write8(esp->config1, ESP_CFG1);
289 switch (esp->rev) {
290 case ESP100:
291
292 break;
293
294 case ESP100A:
295 esp_write8(esp->config2, ESP_CFG2);
296 break;
297
298 case ESP236:
299
300 esp_write8(esp->config2, ESP_CFG2);
301 esp->prev_cfg3 = esp->target[0].esp_config3;
302 esp_write8(esp->prev_cfg3, ESP_CFG3);
303 break;
304
305 case FASHME:
306 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
307
308
309 case FAS236:
310 case PCSCSI:
311
312 esp_write8(esp->config2, ESP_CFG2);
313 if (esp->rev == FASHME) {
314 u8 cfg3 = esp->target[0].esp_config3;
315
316 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
317 if (esp->scsi_id >= 8)
318 cfg3 |= ESP_CONFIG3_IDBIT3;
319 esp_set_all_config3(esp, cfg3);
320 } else {
321 u32 cfg3 = esp->target[0].esp_config3;
322
323 cfg3 |= ESP_CONFIG3_FCLK;
324 esp_set_all_config3(esp, cfg3);
325 }
326 esp->prev_cfg3 = esp->target[0].esp_config3;
327 esp_write8(esp->prev_cfg3, ESP_CFG3);
328 if (esp->rev == FASHME) {
329 esp->radelay = 80;
330 } else {
331 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
332 esp->radelay = 0;
333 else
334 esp->radelay = 96;
335 }
336 break;
337
338 case FAS100A:
339
340 esp_write8(esp->config2, ESP_CFG2);
341 esp_set_all_config3(esp,
342 (esp->target[0].esp_config3 |
343 ESP_CONFIG3_FCLOCK));
344 esp->prev_cfg3 = esp->target[0].esp_config3;
345 esp_write8(esp->prev_cfg3, ESP_CFG3);
346 esp->radelay = 32;
347 break;
348
349 default:
350 break;
351 }
352
353
354 esp_write8(esp->cfact, ESP_CFACT);
355
356 esp->prev_stp = 0;
357 esp_write8(esp->prev_stp, ESP_STP);
358
359 esp->prev_soff = 0;
360 esp_write8(esp->prev_soff, ESP_SOFF);
361
362 esp_write8(esp->neg_defp, ESP_TIMEO);
363
364
365 esp_read8(ESP_INTRPT);
366 udelay(100);
367 }
368
369 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
370 {
371 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
372 struct scatterlist *sg = scsi_sglist(cmd);
373 int total = 0, i;
374 struct scatterlist *s;
375
376 if (cmd->sc_data_direction == DMA_NONE)
377 return;
378
379 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
380
381
382
383
384 spriv->num_sg = scsi_sg_count(cmd);
385
386 scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
387 s->dma_address = (uintptr_t)sg_virt(s);
388 total += sg_dma_len(s);
389 }
390 } else {
391 spriv->num_sg = scsi_dma_map(cmd);
392 scsi_for_each_sg(cmd, s, spriv->num_sg, i)
393 total += sg_dma_len(s);
394 }
395 spriv->cur_residue = sg_dma_len(sg);
396 spriv->prv_sg = NULL;
397 spriv->cur_sg = sg;
398 spriv->tot_residue = total;
399 }
400
401 static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
402 struct scsi_cmnd *cmd)
403 {
404 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
405
406 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
407 return ent->sense_dma +
408 (ent->sense_ptr - cmd->sense_buffer);
409 }
410
411 return sg_dma_address(p->cur_sg) +
412 (sg_dma_len(p->cur_sg) -
413 p->cur_residue);
414 }
415
416 static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
417 struct scsi_cmnd *cmd)
418 {
419 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
420
421 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
422 return SCSI_SENSE_BUFFERSIZE -
423 (ent->sense_ptr - cmd->sense_buffer);
424 }
425 return p->cur_residue;
426 }
427
428 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
429 struct scsi_cmnd *cmd, unsigned int len)
430 {
431 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
432
433 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
434 ent->sense_ptr += len;
435 return;
436 }
437
438 p->cur_residue -= len;
439 p->tot_residue -= len;
440 if (p->cur_residue < 0 || p->tot_residue < 0) {
441 shost_printk(KERN_ERR, esp->host,
442 "Data transfer overflow.\n");
443 shost_printk(KERN_ERR, esp->host,
444 "cur_residue[%d] tot_residue[%d] len[%u]\n",
445 p->cur_residue, p->tot_residue, len);
446 p->cur_residue = 0;
447 p->tot_residue = 0;
448 }
449 if (!p->cur_residue && p->tot_residue) {
450 p->prv_sg = p->cur_sg;
451 p->cur_sg = sg_next(p->cur_sg);
452 p->cur_residue = sg_dma_len(p->cur_sg);
453 }
454 }
455
456 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
457 {
458 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
459 scsi_dma_unmap(cmd);
460 }
461
462 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
463 {
464 struct scsi_cmnd *cmd = ent->cmd;
465 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
466
467 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
468 ent->saved_sense_ptr = ent->sense_ptr;
469 return;
470 }
471 ent->saved_cur_residue = spriv->cur_residue;
472 ent->saved_prv_sg = spriv->prv_sg;
473 ent->saved_cur_sg = spriv->cur_sg;
474 ent->saved_tot_residue = spriv->tot_residue;
475 }
476
477 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
478 {
479 struct scsi_cmnd *cmd = ent->cmd;
480 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
481
482 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
483 ent->sense_ptr = ent->saved_sense_ptr;
484 return;
485 }
486 spriv->cur_residue = ent->saved_cur_residue;
487 spriv->prv_sg = ent->saved_prv_sg;
488 spriv->cur_sg = ent->saved_cur_sg;
489 spriv->tot_residue = ent->saved_tot_residue;
490 }
491
492 static void esp_write_tgt_config3(struct esp *esp, int tgt)
493 {
494 if (esp->rev > ESP100A) {
495 u8 val = esp->target[tgt].esp_config3;
496
497 if (val != esp->prev_cfg3) {
498 esp->prev_cfg3 = val;
499 esp_write8(val, ESP_CFG3);
500 }
501 }
502 }
503
504 static void esp_write_tgt_sync(struct esp *esp, int tgt)
505 {
506 u8 off = esp->target[tgt].esp_offset;
507 u8 per = esp->target[tgt].esp_period;
508
509 if (off != esp->prev_soff) {
510 esp->prev_soff = off;
511 esp_write8(off, ESP_SOFF);
512 }
513 if (per != esp->prev_stp) {
514 esp->prev_stp = per;
515 esp_write8(per, ESP_STP);
516 }
517 }
518
519 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
520 {
521 if (esp->rev == FASHME) {
522
523 if (dma_len > (1U << 24))
524 dma_len = (1U << 24);
525 } else {
526 u32 base, end;
527
528
529
530
531
532
533
534 if (dma_len > (1U << 16))
535 dma_len = (1U << 16);
536
537
538
539
540 base = dma_addr & ((1U << 24) - 1U);
541 end = base + dma_len;
542 if (end > (1U << 24))
543 end = (1U <<24);
544 dma_len = end - base;
545 }
546 return dma_len;
547 }
548
549 static int esp_need_to_nego_wide(struct esp_target_data *tp)
550 {
551 struct scsi_target *target = tp->starget;
552
553 return spi_width(target) != tp->nego_goal_width;
554 }
555
556 static int esp_need_to_nego_sync(struct esp_target_data *tp)
557 {
558 struct scsi_target *target = tp->starget;
559
560
561 if (!spi_offset(target) && !tp->nego_goal_offset)
562 return 0;
563
564 if (spi_offset(target) == tp->nego_goal_offset &&
565 spi_period(target) == tp->nego_goal_period)
566 return 0;
567
568 return 1;
569 }
570
571 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
572 struct esp_lun_data *lp)
573 {
574 if (!ent->orig_tag[0]) {
575
576 if (lp->non_tagged_cmd)
577 return -EBUSY;
578
579 if (lp->hold) {
580
581
582
583 if (lp->num_tagged)
584 return -EBUSY;
585
586
587
588
589 lp->hold = 0;
590 } else if (lp->num_tagged) {
591
592
593
594 lp->hold = 1;
595 return -EBUSY;
596 }
597
598 lp->non_tagged_cmd = ent;
599 return 0;
600 }
601
602
603 if (lp->non_tagged_cmd || lp->hold)
604 return -EBUSY;
605
606 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
607
608 lp->tagged_cmds[ent->orig_tag[1]] = ent;
609 lp->num_tagged++;
610
611 return 0;
612 }
613
614 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
615 struct esp_lun_data *lp)
616 {
617 if (ent->orig_tag[0]) {
618 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
619 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
620 lp->num_tagged--;
621 } else {
622 BUG_ON(lp->non_tagged_cmd != ent);
623 lp->non_tagged_cmd = NULL;
624 }
625 }
626
627 static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
628 {
629 ent->sense_ptr = ent->cmd->sense_buffer;
630 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
631 ent->sense_dma = (uintptr_t)ent->sense_ptr;
632 return;
633 }
634
635 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
636 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
637 }
638
639 static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
640 {
641 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
642 dma_unmap_single(esp->dev, ent->sense_dma,
643 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
644 ent->sense_ptr = NULL;
645 }
646
647
648
649
650
651
652
653
654 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
655 {
656 struct scsi_cmnd *cmd = ent->cmd;
657 struct scsi_device *dev = cmd->device;
658 int tgt, lun;
659 u8 *p, val;
660
661 tgt = dev->id;
662 lun = dev->lun;
663
664
665 if (!ent->sense_ptr) {
666 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
667 tgt, lun);
668 esp_map_sense(esp, ent);
669 }
670 ent->saved_sense_ptr = ent->sense_ptr;
671
672 esp->active_cmd = ent;
673
674 p = esp->command_block;
675 esp->msg_out_len = 0;
676
677 *p++ = IDENTIFY(0, lun);
678 *p++ = REQUEST_SENSE;
679 *p++ = ((dev->scsi_level <= SCSI_2) ?
680 (lun << 5) : 0);
681 *p++ = 0;
682 *p++ = 0;
683 *p++ = SCSI_SENSE_BUFFERSIZE;
684 *p++ = 0;
685
686 esp->select_state = ESP_SELECT_BASIC;
687
688 val = tgt;
689 if (esp->rev == FASHME)
690 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
691 esp_write8(val, ESP_BUSID);
692
693 esp_write_tgt_sync(esp, tgt);
694 esp_write_tgt_config3(esp, tgt);
695
696 val = (p - esp->command_block);
697
698 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
699 }
700
701 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
702 {
703 struct esp_cmd_entry *ent;
704
705 list_for_each_entry(ent, &esp->queued_cmds, list) {
706 struct scsi_cmnd *cmd = ent->cmd;
707 struct scsi_device *dev = cmd->device;
708 struct esp_lun_data *lp = dev->hostdata;
709
710 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
711 ent->tag[0] = 0;
712 ent->tag[1] = 0;
713 return ent;
714 }
715
716 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
717 ent->tag[0] = 0;
718 ent->tag[1] = 0;
719 }
720 ent->orig_tag[0] = ent->tag[0];
721 ent->orig_tag[1] = ent->tag[1];
722
723 if (esp_alloc_lun_tag(ent, lp) < 0)
724 continue;
725
726 return ent;
727 }
728
729 return NULL;
730 }
731
732 static void esp_maybe_execute_command(struct esp *esp)
733 {
734 struct esp_target_data *tp;
735 struct scsi_device *dev;
736 struct scsi_cmnd *cmd;
737 struct esp_cmd_entry *ent;
738 bool select_and_stop = false;
739 int tgt, lun, i;
740 u32 val, start_cmd;
741 u8 *p;
742
743 if (esp->active_cmd ||
744 (esp->flags & ESP_FLAG_RESETTING))
745 return;
746
747 ent = find_and_prep_issuable_command(esp);
748 if (!ent)
749 return;
750
751 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
752 esp_autosense(esp, ent);
753 return;
754 }
755
756 cmd = ent->cmd;
757 dev = cmd->device;
758 tgt = dev->id;
759 lun = dev->lun;
760 tp = &esp->target[tgt];
761
762 list_move(&ent->list, &esp->active_cmds);
763
764 esp->active_cmd = ent;
765
766 esp_map_dma(esp, cmd);
767 esp_save_pointers(esp, ent);
768
769 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
770 select_and_stop = true;
771
772 p = esp->command_block;
773
774 esp->msg_out_len = 0;
775 if (tp->flags & ESP_TGT_CHECK_NEGO) {
776
777
778
779 if (tp->flags & ESP_TGT_BROKEN) {
780 tp->flags &= ~ESP_TGT_DISCONNECT;
781 tp->nego_goal_period = 0;
782 tp->nego_goal_offset = 0;
783 tp->nego_goal_width = 0;
784 tp->nego_goal_tags = 0;
785 }
786
787
788 if (spi_width(tp->starget) == tp->nego_goal_width &&
789 spi_period(tp->starget) == tp->nego_goal_period &&
790 spi_offset(tp->starget) == tp->nego_goal_offset) {
791 tp->flags &= ~ESP_TGT_CHECK_NEGO;
792 goto build_identify;
793 }
794
795 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
796 esp->msg_out_len =
797 spi_populate_width_msg(&esp->msg_out[0],
798 (tp->nego_goal_width ?
799 1 : 0));
800 tp->flags |= ESP_TGT_NEGO_WIDE;
801 } else if (esp_need_to_nego_sync(tp)) {
802 esp->msg_out_len =
803 spi_populate_sync_msg(&esp->msg_out[0],
804 tp->nego_goal_period,
805 tp->nego_goal_offset);
806 tp->flags |= ESP_TGT_NEGO_SYNC;
807 } else {
808 tp->flags &= ~ESP_TGT_CHECK_NEGO;
809 }
810
811
812 if (esp->msg_out_len)
813 select_and_stop = true;
814 }
815
816 build_identify:
817 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
818
819 if (ent->tag[0] && esp->rev == ESP100) {
820
821
822
823 select_and_stop = true;
824 }
825
826 if (select_and_stop) {
827 esp->cmd_bytes_left = cmd->cmd_len;
828 esp->cmd_bytes_ptr = &cmd->cmnd[0];
829
830 if (ent->tag[0]) {
831 for (i = esp->msg_out_len - 1;
832 i >= 0; i--)
833 esp->msg_out[i + 2] = esp->msg_out[i];
834 esp->msg_out[0] = ent->tag[0];
835 esp->msg_out[1] = ent->tag[1];
836 esp->msg_out_len += 2;
837 }
838
839 start_cmd = ESP_CMD_SELAS;
840 esp->select_state = ESP_SELECT_MSGOUT;
841 } else {
842 start_cmd = ESP_CMD_SELA;
843 if (ent->tag[0]) {
844 *p++ = ent->tag[0];
845 *p++ = ent->tag[1];
846
847 start_cmd = ESP_CMD_SA3;
848 }
849
850 for (i = 0; i < cmd->cmd_len; i++)
851 *p++ = cmd->cmnd[i];
852
853 esp->select_state = ESP_SELECT_BASIC;
854 }
855 val = tgt;
856 if (esp->rev == FASHME)
857 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
858 esp_write8(val, ESP_BUSID);
859
860 esp_write_tgt_sync(esp, tgt);
861 esp_write_tgt_config3(esp, tgt);
862
863 val = (p - esp->command_block);
864
865 if (esp_debug & ESP_DEBUG_SCSICMD) {
866 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
867 for (i = 0; i < cmd->cmd_len; i++)
868 printk("%02x ", cmd->cmnd[i]);
869 printk("]\n");
870 }
871
872 esp_send_dma_cmd(esp, val, 16, start_cmd);
873 }
874
875 static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
876 {
877 struct list_head *head = &esp->esp_cmd_pool;
878 struct esp_cmd_entry *ret;
879
880 if (list_empty(head)) {
881 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
882 } else {
883 ret = list_entry(head->next, struct esp_cmd_entry, list);
884 list_del(&ret->list);
885 memset(ret, 0, sizeof(*ret));
886 }
887 return ret;
888 }
889
890 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
891 {
892 list_add(&ent->list, &esp->esp_cmd_pool);
893 }
894
895 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
896 struct scsi_cmnd *cmd, unsigned int result)
897 {
898 struct scsi_device *dev = cmd->device;
899 int tgt = dev->id;
900 int lun = dev->lun;
901
902 esp->active_cmd = NULL;
903 esp_unmap_dma(esp, cmd);
904 esp_free_lun_tag(ent, dev->hostdata);
905 cmd->result = result;
906
907 if (ent->eh_done) {
908 complete(ent->eh_done);
909 ent->eh_done = NULL;
910 }
911
912 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
913 esp_unmap_sense(esp, ent);
914
915
916
917
918
919 cmd->result = ((DRIVER_SENSE << 24) |
920 (DID_OK << 16) |
921 (COMMAND_COMPLETE << 8) |
922 (SAM_STAT_CHECK_CONDITION << 0));
923
924 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
925 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
926 int i;
927
928 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
929 esp->host->unique_id, tgt, lun);
930 for (i = 0; i < 18; i++)
931 printk("%02x ", cmd->sense_buffer[i]);
932 printk("]\n");
933 }
934 }
935
936 cmd->scsi_done(cmd);
937
938 list_del(&ent->list);
939 esp_put_ent(esp, ent);
940
941 esp_maybe_execute_command(esp);
942 }
943
944 static unsigned int compose_result(unsigned int status, unsigned int message,
945 unsigned int driver_code)
946 {
947 return (status | (message << 8) | (driver_code << 16));
948 }
949
950 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
951 {
952 struct scsi_device *dev = ent->cmd->device;
953 struct esp_lun_data *lp = dev->hostdata;
954
955 scsi_track_queue_full(dev, lp->num_tagged - 1);
956 }
957
958 static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
959 {
960 struct scsi_device *dev = cmd->device;
961 struct esp *esp = shost_priv(dev->host);
962 struct esp_cmd_priv *spriv;
963 struct esp_cmd_entry *ent;
964
965 ent = esp_get_ent(esp);
966 if (!ent)
967 return SCSI_MLQUEUE_HOST_BUSY;
968
969 ent->cmd = cmd;
970
971 cmd->scsi_done = done;
972
973 spriv = ESP_CMD_PRIV(cmd);
974 spriv->num_sg = 0;
975
976 list_add_tail(&ent->list, &esp->queued_cmds);
977
978 esp_maybe_execute_command(esp);
979
980 return 0;
981 }
982
983 static DEF_SCSI_QCMD(esp_queuecommand)
984
985 static int esp_check_gross_error(struct esp *esp)
986 {
987 if (esp->sreg & ESP_STAT_SPAM) {
988
989
990
991
992
993
994 shost_printk(KERN_ERR, esp->host,
995 "Gross error sreg[%02x]\n", esp->sreg);
996
997 return 1;
998 }
999 return 0;
1000 }
1001
1002 static int esp_check_spur_intr(struct esp *esp)
1003 {
1004 switch (esp->rev) {
1005 case ESP100:
1006 case ESP100A:
1007
1008
1009
1010 esp->sreg &= ~ESP_STAT_INTR;
1011 break;
1012
1013 default:
1014 if (!(esp->sreg & ESP_STAT_INTR)) {
1015 if (esp->ireg & ESP_INTR_SR)
1016 return 1;
1017
1018
1019
1020
1021 if (!esp->ops->dma_error(esp)) {
1022 shost_printk(KERN_ERR, esp->host,
1023 "Spurious irq, sreg=%02x.\n",
1024 esp->sreg);
1025 return -1;
1026 }
1027
1028 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1029
1030
1031 return -1;
1032 }
1033 break;
1034 }
1035
1036 return 0;
1037 }
1038
1039 static void esp_schedule_reset(struct esp *esp)
1040 {
1041 esp_log_reset("esp_schedule_reset() from %ps\n",
1042 __builtin_return_address(0));
1043 esp->flags |= ESP_FLAG_RESETTING;
1044 esp_event(esp, ESP_EVENT_RESET);
1045 }
1046
1047
1048
1049
1050
1051 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1052 struct esp_lun_data *lp)
1053 {
1054 struct esp_cmd_entry *ent;
1055 int i;
1056
1057 if (!lp->num_tagged) {
1058 shost_printk(KERN_ERR, esp->host,
1059 "Reconnect w/num_tagged==0\n");
1060 return NULL;
1061 }
1062
1063 esp_log_reconnect("reconnect tag, ");
1064
1065 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1066 if (esp->ops->irq_pending(esp))
1067 break;
1068 }
1069 if (i == ESP_QUICKIRQ_LIMIT) {
1070 shost_printk(KERN_ERR, esp->host,
1071 "Reconnect IRQ1 timeout\n");
1072 return NULL;
1073 }
1074
1075 esp->sreg = esp_read8(ESP_STATUS);
1076 esp->ireg = esp_read8(ESP_INTRPT);
1077
1078 esp_log_reconnect("IRQ(%d:%x:%x), ",
1079 i, esp->ireg, esp->sreg);
1080
1081 if (esp->ireg & ESP_INTR_DC) {
1082 shost_printk(KERN_ERR, esp->host,
1083 "Reconnect, got disconnect.\n");
1084 return NULL;
1085 }
1086
1087 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1088 shost_printk(KERN_ERR, esp->host,
1089 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1090 return NULL;
1091 }
1092
1093
1094 esp->command_block[0] = 0xff;
1095 esp->command_block[1] = 0xff;
1096 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1097 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1098
1099
1100 scsi_esp_cmd(esp, ESP_CMD_MOK);
1101
1102 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1103 if (esp->ops->irq_pending(esp)) {
1104 esp->sreg = esp_read8(ESP_STATUS);
1105 esp->ireg = esp_read8(ESP_INTRPT);
1106 if (esp->ireg & ESP_INTR_FDONE)
1107 break;
1108 }
1109 udelay(1);
1110 }
1111 if (i == ESP_RESELECT_TAG_LIMIT) {
1112 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1113 return NULL;
1114 }
1115 esp->ops->dma_drain(esp);
1116 esp->ops->dma_invalidate(esp);
1117
1118 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1119 i, esp->ireg, esp->sreg,
1120 esp->command_block[0],
1121 esp->command_block[1]);
1122
1123 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1124 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1125 shost_printk(KERN_ERR, esp->host,
1126 "Reconnect, bad tag type %02x.\n",
1127 esp->command_block[0]);
1128 return NULL;
1129 }
1130
1131 ent = lp->tagged_cmds[esp->command_block[1]];
1132 if (!ent) {
1133 shost_printk(KERN_ERR, esp->host,
1134 "Reconnect, no entry for tag %02x.\n",
1135 esp->command_block[1]);
1136 return NULL;
1137 }
1138
1139 return ent;
1140 }
1141
1142 static int esp_reconnect(struct esp *esp)
1143 {
1144 struct esp_cmd_entry *ent;
1145 struct esp_target_data *tp;
1146 struct esp_lun_data *lp;
1147 struct scsi_device *dev;
1148 int target, lun;
1149
1150 BUG_ON(esp->active_cmd);
1151 if (esp->rev == FASHME) {
1152
1153
1154
1155 target = esp->fifo[0];
1156 lun = esp->fifo[1] & 0x7;
1157 } else {
1158 u8 bits = esp_read8(ESP_FDATA);
1159
1160
1161
1162
1163
1164
1165
1166 if (!(bits & esp->scsi_id_mask))
1167 goto do_reset;
1168 bits &= ~esp->scsi_id_mask;
1169 if (!bits || (bits & (bits - 1)))
1170 goto do_reset;
1171
1172 target = ffs(bits) - 1;
1173 lun = (esp_read8(ESP_FDATA) & 0x7);
1174
1175 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1176 if (esp->rev == ESP100) {
1177 u8 ireg = esp_read8(ESP_INTRPT);
1178
1179
1180
1181
1182
1183 if (ireg & ESP_INTR_SR)
1184 goto do_reset;
1185 }
1186 scsi_esp_cmd(esp, ESP_CMD_NULL);
1187 }
1188
1189 esp_write_tgt_sync(esp, target);
1190 esp_write_tgt_config3(esp, target);
1191
1192 scsi_esp_cmd(esp, ESP_CMD_MOK);
1193
1194 if (esp->rev == FASHME)
1195 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1196 ESP_BUSID);
1197
1198 tp = &esp->target[target];
1199 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1200 if (!dev) {
1201 shost_printk(KERN_ERR, esp->host,
1202 "Reconnect, no lp tgt[%u] lun[%u]\n",
1203 target, lun);
1204 goto do_reset;
1205 }
1206 lp = dev->hostdata;
1207
1208 ent = lp->non_tagged_cmd;
1209 if (!ent) {
1210 ent = esp_reconnect_with_tag(esp, lp);
1211 if (!ent)
1212 goto do_reset;
1213 }
1214
1215 esp->active_cmd = ent;
1216
1217 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1218 esp_restore_pointers(esp, ent);
1219 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1220 return 1;
1221
1222 do_reset:
1223 esp_schedule_reset(esp);
1224 return 0;
1225 }
1226
1227 static int esp_finish_select(struct esp *esp)
1228 {
1229 struct esp_cmd_entry *ent;
1230 struct scsi_cmnd *cmd;
1231
1232
1233 esp->select_state = ESP_SELECT_NONE;
1234
1235 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1236 ent = esp->active_cmd;
1237 cmd = ent->cmd;
1238
1239 if (esp->ops->dma_error(esp)) {
1240
1241
1242
1243 esp_schedule_reset(esp);
1244 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1245 return 0;
1246 }
1247
1248 esp->ops->dma_invalidate(esp);
1249
1250 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1251 struct esp_target_data *tp = &esp->target[cmd->device->id];
1252
1253
1254
1255
1256
1257 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1258 esp_unmap_dma(esp, cmd);
1259 esp_free_lun_tag(ent, cmd->device->hostdata);
1260 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1261 esp->cmd_bytes_ptr = NULL;
1262 esp->cmd_bytes_left = 0;
1263 } else {
1264 esp_unmap_sense(esp, ent);
1265 }
1266
1267
1268
1269
1270 list_move(&ent->list, &esp->queued_cmds);
1271 esp->active_cmd = NULL;
1272
1273
1274
1275
1276 return 0;
1277 }
1278
1279 if (esp->ireg == ESP_INTR_DC) {
1280 struct scsi_device *dev = cmd->device;
1281
1282
1283
1284
1285
1286 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1287
1288 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1289 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1290 return 1;
1291 }
1292
1293 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1294
1295
1296
1297 if (esp->rev <= ESP236) {
1298 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1299
1300 scsi_esp_cmd(esp, ESP_CMD_NULL);
1301
1302 if (!fcnt &&
1303 (!esp->prev_soff ||
1304 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1305 esp_flush_fifo(esp);
1306 }
1307
1308
1309
1310
1311 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1312 return 0;
1313 }
1314
1315 shost_printk(KERN_INFO, esp->host,
1316 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1317 esp_schedule_reset(esp);
1318 return 0;
1319 }
1320
1321 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1322 struct scsi_cmnd *cmd)
1323 {
1324 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1325
1326 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1327 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1328 fifo_cnt <<= 1;
1329
1330 ecount = 0;
1331 if (!(esp->sreg & ESP_STAT_TCNT)) {
1332 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1333 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1334 if (esp->rev == FASHME)
1335 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1336 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1337 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1338 }
1339
1340 bytes_sent = esp->data_dma_len;
1341 bytes_sent -= ecount;
1342 bytes_sent -= esp->send_cmd_residual;
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1353 size_t count = 1;
1354 size_t offset = bytes_sent;
1355 u8 bval = esp_read8(ESP_FDATA);
1356
1357 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1358 ent->sense_ptr[bytes_sent] = bval;
1359 else {
1360 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1361 u8 *ptr;
1362
1363 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1364 &offset, &count);
1365 if (likely(ptr)) {
1366 *(ptr + offset) = bval;
1367 scsi_kunmap_atomic_sg(ptr);
1368 }
1369 }
1370 bytes_sent += fifo_cnt;
1371 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1372 }
1373 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1374 bytes_sent -= fifo_cnt;
1375
1376 flush_fifo = 0;
1377 if (!esp->prev_soff) {
1378
1379 flush_fifo = 1;
1380 } else {
1381 if (esp->rev == ESP100) {
1382 u32 fflags, phase;
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 esp->sreg = esp_read8(ESP_STATUS);
1396 phase = esp->sreg & ESP_STAT_PMASK;
1397 fflags = esp_read8(ESP_FFLAGS);
1398
1399 if ((phase == ESP_DOP &&
1400 (fflags & ESP_FF_ONOTZERO)) ||
1401 (phase == ESP_DIP &&
1402 (fflags & ESP_FF_FBYTES)))
1403 return -1;
1404 }
1405 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1406 flush_fifo = 1;
1407 }
1408
1409 if (flush_fifo)
1410 esp_flush_fifo(esp);
1411
1412 return bytes_sent;
1413 }
1414
1415 static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1416 u8 scsi_period, u8 scsi_offset,
1417 u8 esp_stp, u8 esp_soff)
1418 {
1419 spi_period(tp->starget) = scsi_period;
1420 spi_offset(tp->starget) = scsi_offset;
1421 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1422
1423 if (esp_soff) {
1424 esp_stp &= 0x1f;
1425 esp_soff |= esp->radelay;
1426 if (esp->rev >= FAS236) {
1427 u8 bit = ESP_CONFIG3_FSCSI;
1428 if (esp->rev >= FAS100A)
1429 bit = ESP_CONFIG3_FAST;
1430
1431 if (scsi_period < 50) {
1432 if (esp->rev == FASHME)
1433 esp_soff &= ~esp->radelay;
1434 tp->esp_config3 |= bit;
1435 } else {
1436 tp->esp_config3 &= ~bit;
1437 }
1438 esp->prev_cfg3 = tp->esp_config3;
1439 esp_write8(esp->prev_cfg3, ESP_CFG3);
1440 }
1441 }
1442
1443 tp->esp_period = esp->prev_stp = esp_stp;
1444 tp->esp_offset = esp->prev_soff = esp_soff;
1445
1446 esp_write8(esp_soff, ESP_SOFF);
1447 esp_write8(esp_stp, ESP_STP);
1448
1449 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1450
1451 spi_display_xfer_agreement(tp->starget);
1452 }
1453
1454 static void esp_msgin_reject(struct esp *esp)
1455 {
1456 struct esp_cmd_entry *ent = esp->active_cmd;
1457 struct scsi_cmnd *cmd = ent->cmd;
1458 struct esp_target_data *tp;
1459 int tgt;
1460
1461 tgt = cmd->device->id;
1462 tp = &esp->target[tgt];
1463
1464 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1465 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1466
1467 if (!esp_need_to_nego_sync(tp)) {
1468 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1469 scsi_esp_cmd(esp, ESP_CMD_RATN);
1470 } else {
1471 esp->msg_out_len =
1472 spi_populate_sync_msg(&esp->msg_out[0],
1473 tp->nego_goal_period,
1474 tp->nego_goal_offset);
1475 tp->flags |= ESP_TGT_NEGO_SYNC;
1476 scsi_esp_cmd(esp, ESP_CMD_SATN);
1477 }
1478 return;
1479 }
1480
1481 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1482 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1483 tp->esp_period = 0;
1484 tp->esp_offset = 0;
1485 esp_setsync(esp, tp, 0, 0, 0, 0);
1486 scsi_esp_cmd(esp, ESP_CMD_RATN);
1487 return;
1488 }
1489
1490 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1491 esp_schedule_reset(esp);
1492 }
1493
1494 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1495 {
1496 u8 period = esp->msg_in[3];
1497 u8 offset = esp->msg_in[4];
1498 u8 stp;
1499
1500 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1501 goto do_reject;
1502
1503 if (offset > 15)
1504 goto do_reject;
1505
1506 if (offset) {
1507 int one_clock;
1508
1509 if (period > esp->max_period) {
1510 period = offset = 0;
1511 goto do_sdtr;
1512 }
1513 if (period < esp->min_period)
1514 goto do_reject;
1515
1516 one_clock = esp->ccycle / 1000;
1517 stp = DIV_ROUND_UP(period << 2, one_clock);
1518 if (stp && esp->rev >= FAS236) {
1519 if (stp >= 50)
1520 stp--;
1521 }
1522 } else {
1523 stp = 0;
1524 }
1525
1526 esp_setsync(esp, tp, period, offset, stp, offset);
1527 return;
1528
1529 do_reject:
1530 esp->msg_out[0] = MESSAGE_REJECT;
1531 esp->msg_out_len = 1;
1532 scsi_esp_cmd(esp, ESP_CMD_SATN);
1533 return;
1534
1535 do_sdtr:
1536 tp->nego_goal_period = period;
1537 tp->nego_goal_offset = offset;
1538 esp->msg_out_len =
1539 spi_populate_sync_msg(&esp->msg_out[0],
1540 tp->nego_goal_period,
1541 tp->nego_goal_offset);
1542 scsi_esp_cmd(esp, ESP_CMD_SATN);
1543 }
1544
1545 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1546 {
1547 int size = 8 << esp->msg_in[3];
1548 u8 cfg3;
1549
1550 if (esp->rev != FASHME)
1551 goto do_reject;
1552
1553 if (size != 8 && size != 16)
1554 goto do_reject;
1555
1556 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1557 goto do_reject;
1558
1559 cfg3 = tp->esp_config3;
1560 if (size == 16) {
1561 tp->flags |= ESP_TGT_WIDE;
1562 cfg3 |= ESP_CONFIG3_EWIDE;
1563 } else {
1564 tp->flags &= ~ESP_TGT_WIDE;
1565 cfg3 &= ~ESP_CONFIG3_EWIDE;
1566 }
1567 tp->esp_config3 = cfg3;
1568 esp->prev_cfg3 = cfg3;
1569 esp_write8(cfg3, ESP_CFG3);
1570
1571 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1572
1573 spi_period(tp->starget) = 0;
1574 spi_offset(tp->starget) = 0;
1575 if (!esp_need_to_nego_sync(tp)) {
1576 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1577 scsi_esp_cmd(esp, ESP_CMD_RATN);
1578 } else {
1579 esp->msg_out_len =
1580 spi_populate_sync_msg(&esp->msg_out[0],
1581 tp->nego_goal_period,
1582 tp->nego_goal_offset);
1583 tp->flags |= ESP_TGT_NEGO_SYNC;
1584 scsi_esp_cmd(esp, ESP_CMD_SATN);
1585 }
1586 return;
1587
1588 do_reject:
1589 esp->msg_out[0] = MESSAGE_REJECT;
1590 esp->msg_out_len = 1;
1591 scsi_esp_cmd(esp, ESP_CMD_SATN);
1592 }
1593
1594 static void esp_msgin_extended(struct esp *esp)
1595 {
1596 struct esp_cmd_entry *ent = esp->active_cmd;
1597 struct scsi_cmnd *cmd = ent->cmd;
1598 struct esp_target_data *tp;
1599 int tgt = cmd->device->id;
1600
1601 tp = &esp->target[tgt];
1602 if (esp->msg_in[2] == EXTENDED_SDTR) {
1603 esp_msgin_sdtr(esp, tp);
1604 return;
1605 }
1606 if (esp->msg_in[2] == EXTENDED_WDTR) {
1607 esp_msgin_wdtr(esp, tp);
1608 return;
1609 }
1610
1611 shost_printk(KERN_INFO, esp->host,
1612 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1613
1614 esp->msg_out[0] = MESSAGE_REJECT;
1615 esp->msg_out_len = 1;
1616 scsi_esp_cmd(esp, ESP_CMD_SATN);
1617 }
1618
1619
1620
1621
1622 static int esp_msgin_process(struct esp *esp)
1623 {
1624 u8 msg0 = esp->msg_in[0];
1625 int len = esp->msg_in_len;
1626
1627 if (msg0 & 0x80) {
1628
1629 shost_printk(KERN_INFO, esp->host,
1630 "Unexpected msgin identify\n");
1631 return 0;
1632 }
1633
1634 switch (msg0) {
1635 case EXTENDED_MESSAGE:
1636 if (len == 1)
1637 return 1;
1638 if (len < esp->msg_in[1] + 2)
1639 return 1;
1640 esp_msgin_extended(esp);
1641 return 0;
1642
1643 case IGNORE_WIDE_RESIDUE: {
1644 struct esp_cmd_entry *ent;
1645 struct esp_cmd_priv *spriv;
1646 if (len == 1)
1647 return 1;
1648
1649 if (esp->msg_in[1] != 1)
1650 goto do_reject;
1651
1652 ent = esp->active_cmd;
1653 spriv = ESP_CMD_PRIV(ent->cmd);
1654
1655 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1656 spriv->cur_sg = spriv->prv_sg;
1657 spriv->cur_residue = 1;
1658 } else
1659 spriv->cur_residue++;
1660 spriv->tot_residue++;
1661 return 0;
1662 }
1663 case NOP:
1664 return 0;
1665 case RESTORE_POINTERS:
1666 esp_restore_pointers(esp, esp->active_cmd);
1667 return 0;
1668 case SAVE_POINTERS:
1669 esp_save_pointers(esp, esp->active_cmd);
1670 return 0;
1671
1672 case COMMAND_COMPLETE:
1673 case DISCONNECT: {
1674 struct esp_cmd_entry *ent = esp->active_cmd;
1675
1676 ent->message = msg0;
1677 esp_event(esp, ESP_EVENT_FREE_BUS);
1678 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1679 return 0;
1680 }
1681 case MESSAGE_REJECT:
1682 esp_msgin_reject(esp);
1683 return 0;
1684
1685 default:
1686 do_reject:
1687 esp->msg_out[0] = MESSAGE_REJECT;
1688 esp->msg_out_len = 1;
1689 scsi_esp_cmd(esp, ESP_CMD_SATN);
1690 return 0;
1691 }
1692 }
1693
1694 static int esp_process_event(struct esp *esp)
1695 {
1696 int write, i;
1697
1698 again:
1699 write = 0;
1700 esp_log_event("process event %d phase %x\n",
1701 esp->event, esp->sreg & ESP_STAT_PMASK);
1702 switch (esp->event) {
1703 case ESP_EVENT_CHECK_PHASE:
1704 switch (esp->sreg & ESP_STAT_PMASK) {
1705 case ESP_DOP:
1706 esp_event(esp, ESP_EVENT_DATA_OUT);
1707 break;
1708 case ESP_DIP:
1709 esp_event(esp, ESP_EVENT_DATA_IN);
1710 break;
1711 case ESP_STATP:
1712 esp_flush_fifo(esp);
1713 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1714 esp_event(esp, ESP_EVENT_STATUS);
1715 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1716 return 1;
1717
1718 case ESP_MOP:
1719 esp_event(esp, ESP_EVENT_MSGOUT);
1720 break;
1721
1722 case ESP_MIP:
1723 esp_event(esp, ESP_EVENT_MSGIN);
1724 break;
1725
1726 case ESP_CMDP:
1727 esp_event(esp, ESP_EVENT_CMD_START);
1728 break;
1729
1730 default:
1731 shost_printk(KERN_INFO, esp->host,
1732 "Unexpected phase, sreg=%02x\n",
1733 esp->sreg);
1734 esp_schedule_reset(esp);
1735 return 0;
1736 }
1737 goto again;
1738
1739 case ESP_EVENT_DATA_IN:
1740 write = 1;
1741
1742
1743 case ESP_EVENT_DATA_OUT: {
1744 struct esp_cmd_entry *ent = esp->active_cmd;
1745 struct scsi_cmnd *cmd = ent->cmd;
1746 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1747 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1748
1749 if (esp->rev == ESP100)
1750 scsi_esp_cmd(esp, ESP_CMD_NULL);
1751
1752 if (write)
1753 ent->flags |= ESP_CMD_FLAG_WRITE;
1754 else
1755 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1756
1757 if (esp->ops->dma_length_limit)
1758 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1759 dma_len);
1760 else
1761 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1762
1763 esp->data_dma_len = dma_len;
1764
1765 if (!dma_len) {
1766 shost_printk(KERN_ERR, esp->host,
1767 "DMA length is zero!\n");
1768 shost_printk(KERN_ERR, esp->host,
1769 "cur adr[%08llx] len[%08x]\n",
1770 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1771 esp_cur_dma_len(ent, cmd));
1772 esp_schedule_reset(esp);
1773 return 0;
1774 }
1775
1776 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1777 (unsigned long long)dma_addr, dma_len, write);
1778
1779 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1780 write, ESP_CMD_DMA | ESP_CMD_TI);
1781 esp_event(esp, ESP_EVENT_DATA_DONE);
1782 break;
1783 }
1784 case ESP_EVENT_DATA_DONE: {
1785 struct esp_cmd_entry *ent = esp->active_cmd;
1786 struct scsi_cmnd *cmd = ent->cmd;
1787 int bytes_sent;
1788
1789 if (esp->ops->dma_error(esp)) {
1790 shost_printk(KERN_INFO, esp->host,
1791 "data done, DMA error, resetting\n");
1792 esp_schedule_reset(esp);
1793 return 0;
1794 }
1795
1796 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1797
1798
1799 esp->ops->dma_drain(esp);
1800 }
1801 esp->ops->dma_invalidate(esp);
1802
1803 if (esp->ireg != ESP_INTR_BSERV) {
1804
1805
1806
1807 shost_printk(KERN_INFO, esp->host,
1808 "data done, not BSERV, resetting\n");
1809 esp_schedule_reset(esp);
1810 return 0;
1811 }
1812
1813 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1814
1815 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1816 ent->flags, bytes_sent);
1817
1818 if (bytes_sent < 0) {
1819
1820 esp_schedule_reset(esp);
1821 return 0;
1822 }
1823
1824 esp_advance_dma(esp, ent, cmd, bytes_sent);
1825 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1826 goto again;
1827 }
1828
1829 case ESP_EVENT_STATUS: {
1830 struct esp_cmd_entry *ent = esp->active_cmd;
1831
1832 if (esp->ireg & ESP_INTR_FDONE) {
1833 ent->status = esp_read8(ESP_FDATA);
1834 ent->message = esp_read8(ESP_FDATA);
1835 scsi_esp_cmd(esp, ESP_CMD_MOK);
1836 } else if (esp->ireg == ESP_INTR_BSERV) {
1837 ent->status = esp_read8(ESP_FDATA);
1838 ent->message = 0xff;
1839 esp_event(esp, ESP_EVENT_MSGIN);
1840 return 0;
1841 }
1842
1843 if (ent->message != COMMAND_COMPLETE) {
1844 shost_printk(KERN_INFO, esp->host,
1845 "Unexpected message %x in status\n",
1846 ent->message);
1847 esp_schedule_reset(esp);
1848 return 0;
1849 }
1850
1851 esp_event(esp, ESP_EVENT_FREE_BUS);
1852 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1853 break;
1854 }
1855 case ESP_EVENT_FREE_BUS: {
1856 struct esp_cmd_entry *ent = esp->active_cmd;
1857 struct scsi_cmnd *cmd = ent->cmd;
1858
1859 if (ent->message == COMMAND_COMPLETE ||
1860 ent->message == DISCONNECT)
1861 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1862
1863 if (ent->message == COMMAND_COMPLETE) {
1864 esp_log_cmddone("Command done status[%x] message[%x]\n",
1865 ent->status, ent->message);
1866 if (ent->status == SAM_STAT_TASK_SET_FULL)
1867 esp_event_queue_full(esp, ent);
1868
1869 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1870 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1871 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1872 esp_autosense(esp, ent);
1873 } else {
1874 esp_cmd_is_done(esp, ent, cmd,
1875 compose_result(ent->status,
1876 ent->message,
1877 DID_OK));
1878 }
1879 } else if (ent->message == DISCONNECT) {
1880 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1881 cmd->device->id,
1882 ent->tag[0], ent->tag[1]);
1883
1884 esp->active_cmd = NULL;
1885 esp_maybe_execute_command(esp);
1886 } else {
1887 shost_printk(KERN_INFO, esp->host,
1888 "Unexpected message %x in freebus\n",
1889 ent->message);
1890 esp_schedule_reset(esp);
1891 return 0;
1892 }
1893 if (esp->active_cmd)
1894 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1895 break;
1896 }
1897 case ESP_EVENT_MSGOUT: {
1898 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1899
1900 if (esp_debug & ESP_DEBUG_MSGOUT) {
1901 int i;
1902 printk("ESP: Sending message [ ");
1903 for (i = 0; i < esp->msg_out_len; i++)
1904 printk("%02x ", esp->msg_out[i]);
1905 printk("]\n");
1906 }
1907
1908 if (esp->rev == FASHME) {
1909 int i;
1910
1911
1912 for (i = 0; i < esp->msg_out_len; i++) {
1913 esp_write8(esp->msg_out[i], ESP_FDATA);
1914 esp_write8(0, ESP_FDATA);
1915 }
1916 scsi_esp_cmd(esp, ESP_CMD_TI);
1917 } else {
1918 if (esp->msg_out_len == 1) {
1919 esp_write8(esp->msg_out[0], ESP_FDATA);
1920 scsi_esp_cmd(esp, ESP_CMD_TI);
1921 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1922 for (i = 0; i < esp->msg_out_len; i++)
1923 esp_write8(esp->msg_out[i], ESP_FDATA);
1924 scsi_esp_cmd(esp, ESP_CMD_TI);
1925 } else {
1926
1927 memcpy(esp->command_block,
1928 esp->msg_out,
1929 esp->msg_out_len);
1930
1931 esp->ops->send_dma_cmd(esp,
1932 esp->command_block_dma,
1933 esp->msg_out_len,
1934 esp->msg_out_len,
1935 0,
1936 ESP_CMD_DMA|ESP_CMD_TI);
1937 }
1938 }
1939 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1940 break;
1941 }
1942 case ESP_EVENT_MSGOUT_DONE:
1943 if (esp->rev == FASHME) {
1944 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1945 } else {
1946 if (esp->msg_out_len > 1)
1947 esp->ops->dma_invalidate(esp);
1948
1949
1950
1951
1952 if (!(esp->ireg & ESP_INTR_DC))
1953 scsi_esp_cmd(esp, ESP_CMD_NULL);
1954 }
1955
1956 esp->msg_out_len = 0;
1957
1958 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1959 goto again;
1960 case ESP_EVENT_MSGIN:
1961 if (esp->ireg & ESP_INTR_BSERV) {
1962 if (esp->rev == FASHME) {
1963 if (!(esp_read8(ESP_STATUS2) &
1964 ESP_STAT2_FEMPTY))
1965 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1966 } else {
1967 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1968 if (esp->rev == ESP100)
1969 scsi_esp_cmd(esp, ESP_CMD_NULL);
1970 }
1971 scsi_esp_cmd(esp, ESP_CMD_TI);
1972 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1973 return 1;
1974 }
1975 if (esp->ireg & ESP_INTR_FDONE) {
1976 u8 val;
1977
1978 if (esp->rev == FASHME)
1979 val = esp->fifo[0];
1980 else
1981 val = esp_read8(ESP_FDATA);
1982 esp->msg_in[esp->msg_in_len++] = val;
1983
1984 esp_log_msgin("Got msgin byte %x\n", val);
1985
1986 if (!esp_msgin_process(esp))
1987 esp->msg_in_len = 0;
1988
1989 if (esp->rev == FASHME)
1990 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1991
1992 scsi_esp_cmd(esp, ESP_CMD_MOK);
1993
1994
1995 if (esp->event == ESP_EVENT_RESET)
1996 return 0;
1997
1998 if (esp->event != ESP_EVENT_FREE_BUS)
1999 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2000 } else {
2001 shost_printk(KERN_INFO, esp->host,
2002 "MSGIN neither BSERV not FDON, resetting");
2003 esp_schedule_reset(esp);
2004 return 0;
2005 }
2006 break;
2007 case ESP_EVENT_CMD_START:
2008 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2009 esp->cmd_bytes_left);
2010 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2011 esp_event(esp, ESP_EVENT_CMD_DONE);
2012 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2013 break;
2014 case ESP_EVENT_CMD_DONE:
2015 esp->ops->dma_invalidate(esp);
2016 if (esp->ireg & ESP_INTR_BSERV) {
2017 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2018 goto again;
2019 }
2020 esp_schedule_reset(esp);
2021 return 0;
2022
2023 case ESP_EVENT_RESET:
2024 scsi_esp_cmd(esp, ESP_CMD_RS);
2025 break;
2026
2027 default:
2028 shost_printk(KERN_INFO, esp->host,
2029 "Unexpected event %x, resetting\n", esp->event);
2030 esp_schedule_reset(esp);
2031 return 0;
2032 }
2033 return 1;
2034 }
2035
2036 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2037 {
2038 struct scsi_cmnd *cmd = ent->cmd;
2039
2040 esp_unmap_dma(esp, cmd);
2041 esp_free_lun_tag(ent, cmd->device->hostdata);
2042 cmd->result = DID_RESET << 16;
2043
2044 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2045 esp_unmap_sense(esp, ent);
2046
2047 cmd->scsi_done(cmd);
2048 list_del(&ent->list);
2049 esp_put_ent(esp, ent);
2050 }
2051
2052 static void esp_clear_hold(struct scsi_device *dev, void *data)
2053 {
2054 struct esp_lun_data *lp = dev->hostdata;
2055
2056 BUG_ON(lp->num_tagged);
2057 lp->hold = 0;
2058 }
2059
2060 static void esp_reset_cleanup(struct esp *esp)
2061 {
2062 struct esp_cmd_entry *ent, *tmp;
2063 int i;
2064
2065 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2066 struct scsi_cmnd *cmd = ent->cmd;
2067
2068 list_del(&ent->list);
2069 cmd->result = DID_RESET << 16;
2070 cmd->scsi_done(cmd);
2071 esp_put_ent(esp, ent);
2072 }
2073
2074 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2075 if (ent == esp->active_cmd)
2076 esp->active_cmd = NULL;
2077 esp_reset_cleanup_one(esp, ent);
2078 }
2079
2080 BUG_ON(esp->active_cmd != NULL);
2081
2082
2083 for (i = 0; i < ESP_MAX_TARGET; i++) {
2084 struct esp_target_data *tp = &esp->target[i];
2085
2086 tp->esp_period = 0;
2087 tp->esp_offset = 0;
2088 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2089 ESP_CONFIG3_FSCSI |
2090 ESP_CONFIG3_FAST);
2091 tp->flags &= ~ESP_TGT_WIDE;
2092 tp->flags |= ESP_TGT_CHECK_NEGO;
2093
2094 if (tp->starget)
2095 __starget_for_each_device(tp->starget, NULL,
2096 esp_clear_hold);
2097 }
2098 esp->flags &= ~ESP_FLAG_RESETTING;
2099 }
2100
2101
2102 static void __esp_interrupt(struct esp *esp)
2103 {
2104 int finish_reset, intr_done;
2105 u8 phase;
2106
2107
2108
2109
2110 esp->sreg = esp_read8(ESP_STATUS);
2111 esp->seqreg = esp_read8(ESP_SSTEP);
2112 esp->ireg = esp_read8(ESP_INTRPT);
2113
2114 if (esp->flags & ESP_FLAG_RESETTING) {
2115 finish_reset = 1;
2116 } else {
2117 if (esp_check_gross_error(esp))
2118 return;
2119
2120 finish_reset = esp_check_spur_intr(esp);
2121 if (finish_reset < 0)
2122 return;
2123 }
2124
2125 if (esp->ireg & ESP_INTR_SR)
2126 finish_reset = 1;
2127
2128 if (finish_reset) {
2129 esp_reset_cleanup(esp);
2130 if (esp->eh_reset) {
2131 complete(esp->eh_reset);
2132 esp->eh_reset = NULL;
2133 }
2134 return;
2135 }
2136
2137 phase = (esp->sreg & ESP_STAT_PMASK);
2138 if (esp->rev == FASHME) {
2139 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2140 esp->select_state == ESP_SELECT_NONE &&
2141 esp->event != ESP_EVENT_STATUS &&
2142 esp->event != ESP_EVENT_DATA_DONE) ||
2143 (esp->ireg & ESP_INTR_RSEL)) {
2144 esp->sreg2 = esp_read8(ESP_STATUS2);
2145 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2146 (esp->sreg2 & ESP_STAT2_F1BYTE))
2147 hme_read_fifo(esp);
2148 }
2149 }
2150
2151 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2152 "sreg2[%02x] ireg[%02x]\n",
2153 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2154
2155 intr_done = 0;
2156
2157 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2158 shost_printk(KERN_INFO, esp->host,
2159 "unexpected IREG %02x\n", esp->ireg);
2160 if (esp->ireg & ESP_INTR_IC)
2161 esp_dump_cmd_log(esp);
2162
2163 esp_schedule_reset(esp);
2164 } else {
2165 if (esp->ireg & ESP_INTR_RSEL) {
2166 if (esp->active_cmd)
2167 (void) esp_finish_select(esp);
2168 intr_done = esp_reconnect(esp);
2169 } else {
2170
2171 if (esp->select_state != ESP_SELECT_NONE)
2172 intr_done = esp_finish_select(esp);
2173 }
2174 }
2175 while (!intr_done)
2176 intr_done = esp_process_event(esp);
2177 }
2178
2179 irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2180 {
2181 struct esp *esp = dev_id;
2182 unsigned long flags;
2183 irqreturn_t ret;
2184
2185 spin_lock_irqsave(esp->host->host_lock, flags);
2186 ret = IRQ_NONE;
2187 if (esp->ops->irq_pending(esp)) {
2188 ret = IRQ_HANDLED;
2189 for (;;) {
2190 int i;
2191
2192 __esp_interrupt(esp);
2193 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2194 break;
2195 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2196
2197 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2198 if (esp->ops->irq_pending(esp))
2199 break;
2200 }
2201 if (i == ESP_QUICKIRQ_LIMIT)
2202 break;
2203 }
2204 }
2205 spin_unlock_irqrestore(esp->host->host_lock, flags);
2206
2207 return ret;
2208 }
2209 EXPORT_SYMBOL(scsi_esp_intr);
2210
2211 static void esp_get_revision(struct esp *esp)
2212 {
2213 u8 val;
2214
2215 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2216 if (esp->config2 == 0) {
2217 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2218 esp_write8(esp->config2, ESP_CFG2);
2219
2220 val = esp_read8(ESP_CFG2);
2221 val &= ~ESP_CONFIG2_MAGIC;
2222
2223 esp->config2 = 0;
2224 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2225
2226
2227
2228
2229
2230 esp->rev = ESP100;
2231 return;
2232 }
2233 }
2234
2235 esp_set_all_config3(esp, 5);
2236 esp->prev_cfg3 = 5;
2237 esp_write8(esp->config2, ESP_CFG2);
2238 esp_write8(0, ESP_CFG3);
2239 esp_write8(esp->prev_cfg3, ESP_CFG3);
2240
2241 val = esp_read8(ESP_CFG3);
2242 if (val != 5) {
2243
2244
2245
2246 esp->rev = ESP100A;
2247 } else {
2248 esp_set_all_config3(esp, 0);
2249 esp->prev_cfg3 = 0;
2250 esp_write8(esp->prev_cfg3, ESP_CFG3);
2251
2252
2253
2254
2255 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2256 esp->rev = FAST;
2257 esp->sync_defp = SYNC_DEFP_FAST;
2258 } else {
2259 esp->rev = ESP236;
2260 }
2261 }
2262 }
2263
2264 static void esp_init_swstate(struct esp *esp)
2265 {
2266 int i;
2267
2268 INIT_LIST_HEAD(&esp->queued_cmds);
2269 INIT_LIST_HEAD(&esp->active_cmds);
2270 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2271
2272
2273
2274
2275
2276 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2277 esp->target[i].flags = 0;
2278 esp->target[i].nego_goal_period = 0;
2279 esp->target[i].nego_goal_offset = 0;
2280 esp->target[i].nego_goal_width = 0;
2281 esp->target[i].nego_goal_tags = 0;
2282 }
2283 }
2284
2285
2286 static void esp_bootup_reset(struct esp *esp)
2287 {
2288 u8 val;
2289
2290
2291 esp->ops->reset_dma(esp);
2292
2293
2294 esp_reset_esp(esp);
2295
2296
2297 val = esp_read8(ESP_CFG1);
2298 val |= ESP_CONFIG1_SRRDISAB;
2299 esp_write8(val, ESP_CFG1);
2300
2301 scsi_esp_cmd(esp, ESP_CMD_RS);
2302 udelay(400);
2303
2304 esp_write8(esp->config1, ESP_CFG1);
2305
2306
2307 esp_read8(ESP_INTRPT);
2308 }
2309
2310 static void esp_set_clock_params(struct esp *esp)
2311 {
2312 int fhz;
2313 u8 ccf;
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347 fhz = esp->cfreq;
2348
2349 ccf = ((fhz / 1000000) + 4) / 5;
2350 if (ccf == 1)
2351 ccf = 2;
2352
2353
2354
2355
2356
2357
2358 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2359 fhz = 20000000;
2360 ccf = 4;
2361 }
2362
2363 esp->cfact = (ccf == 8 ? 0 : ccf);
2364 esp->cfreq = fhz;
2365 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2366 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2367 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2368 esp->sync_defp = SYNC_DEFP_SLOW;
2369 }
2370
2371 static const char *esp_chip_names[] = {
2372 "ESP100",
2373 "ESP100A",
2374 "ESP236",
2375 "FAS236",
2376 "FAS100A",
2377 "FAST",
2378 "FASHME",
2379 "AM53C974",
2380 };
2381
2382 static struct scsi_transport_template *esp_transport_template;
2383
2384 int scsi_esp_register(struct esp *esp)
2385 {
2386 static int instance;
2387 int err;
2388
2389 if (!esp->num_tags)
2390 esp->num_tags = ESP_DEFAULT_TAGS;
2391 esp->host->transportt = esp_transport_template;
2392 esp->host->max_lun = ESP_MAX_LUN;
2393 esp->host->cmd_per_lun = 2;
2394 esp->host->unique_id = instance;
2395
2396 esp_set_clock_params(esp);
2397
2398 esp_get_revision(esp);
2399
2400 esp_init_swstate(esp);
2401
2402 esp_bootup_reset(esp);
2403
2404 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2405 esp->host->unique_id, esp->regs, esp->dma_regs,
2406 esp->host->irq);
2407 dev_printk(KERN_INFO, esp->dev,
2408 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2409 esp->host->unique_id, esp_chip_names[esp->rev],
2410 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2411
2412
2413 ssleep(esp_bus_reset_settle);
2414
2415 err = scsi_add_host(esp->host, esp->dev);
2416 if (err)
2417 return err;
2418
2419 instance++;
2420
2421 scsi_scan_host(esp->host);
2422
2423 return 0;
2424 }
2425 EXPORT_SYMBOL(scsi_esp_register);
2426
2427 void scsi_esp_unregister(struct esp *esp)
2428 {
2429 scsi_remove_host(esp->host);
2430 }
2431 EXPORT_SYMBOL(scsi_esp_unregister);
2432
2433 static int esp_target_alloc(struct scsi_target *starget)
2434 {
2435 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2436 struct esp_target_data *tp = &esp->target[starget->id];
2437
2438 tp->starget = starget;
2439
2440 return 0;
2441 }
2442
2443 static void esp_target_destroy(struct scsi_target *starget)
2444 {
2445 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2446 struct esp_target_data *tp = &esp->target[starget->id];
2447
2448 tp->starget = NULL;
2449 }
2450
2451 static int esp_slave_alloc(struct scsi_device *dev)
2452 {
2453 struct esp *esp = shost_priv(dev->host);
2454 struct esp_target_data *tp = &esp->target[dev->id];
2455 struct esp_lun_data *lp;
2456
2457 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2458 if (!lp)
2459 return -ENOMEM;
2460 dev->hostdata = lp;
2461
2462 spi_min_period(tp->starget) = esp->min_period;
2463 spi_max_offset(tp->starget) = 15;
2464
2465 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2466 spi_max_width(tp->starget) = 1;
2467 else
2468 spi_max_width(tp->starget) = 0;
2469
2470 return 0;
2471 }
2472
2473 static int esp_slave_configure(struct scsi_device *dev)
2474 {
2475 struct esp *esp = shost_priv(dev->host);
2476 struct esp_target_data *tp = &esp->target[dev->id];
2477
2478 if (dev->tagged_supported)
2479 scsi_change_queue_depth(dev, esp->num_tags);
2480
2481 tp->flags |= ESP_TGT_DISCONNECT;
2482
2483 if (!spi_initial_dv(dev->sdev_target))
2484 spi_dv_device(dev);
2485
2486 return 0;
2487 }
2488
2489 static void esp_slave_destroy(struct scsi_device *dev)
2490 {
2491 struct esp_lun_data *lp = dev->hostdata;
2492
2493 kfree(lp);
2494 dev->hostdata = NULL;
2495 }
2496
2497 static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2498 {
2499 struct esp *esp = shost_priv(cmd->device->host);
2500 struct esp_cmd_entry *ent, *tmp;
2501 struct completion eh_done;
2502 unsigned long flags;
2503
2504
2505
2506
2507 spin_lock_irqsave(esp->host->host_lock, flags);
2508 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2509 cmd, cmd->cmnd[0]);
2510 ent = esp->active_cmd;
2511 if (ent)
2512 shost_printk(KERN_ERR, esp->host,
2513 "Current command [%p:%02x]\n",
2514 ent->cmd, ent->cmd->cmnd[0]);
2515 list_for_each_entry(ent, &esp->queued_cmds, list) {
2516 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2517 ent->cmd, ent->cmd->cmnd[0]);
2518 }
2519 list_for_each_entry(ent, &esp->active_cmds, list) {
2520 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2521 ent->cmd, ent->cmd->cmnd[0]);
2522 }
2523 esp_dump_cmd_log(esp);
2524 spin_unlock_irqrestore(esp->host->host_lock, flags);
2525
2526 spin_lock_irqsave(esp->host->host_lock, flags);
2527
2528 ent = NULL;
2529 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2530 if (tmp->cmd == cmd) {
2531 ent = tmp;
2532 break;
2533 }
2534 }
2535
2536 if (ent) {
2537
2538
2539
2540 list_del(&ent->list);
2541
2542 cmd->result = DID_ABORT << 16;
2543 cmd->scsi_done(cmd);
2544
2545 esp_put_ent(esp, ent);
2546
2547 goto out_success;
2548 }
2549
2550 init_completion(&eh_done);
2551
2552 ent = esp->active_cmd;
2553 if (ent && ent->cmd == cmd) {
2554
2555
2556
2557
2558 if (esp->msg_out_len)
2559 goto out_failure;
2560
2561
2562
2563
2564 esp->msg_out[0] = ABORT_TASK_SET;
2565 esp->msg_out_len = 1;
2566 ent->eh_done = &eh_done;
2567
2568 scsi_esp_cmd(esp, ESP_CMD_SATN);
2569 } else {
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586 goto out_failure;
2587 }
2588
2589 spin_unlock_irqrestore(esp->host->host_lock, flags);
2590
2591 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2592 spin_lock_irqsave(esp->host->host_lock, flags);
2593 ent->eh_done = NULL;
2594 spin_unlock_irqrestore(esp->host->host_lock, flags);
2595
2596 return FAILED;
2597 }
2598
2599 return SUCCESS;
2600
2601 out_success:
2602 spin_unlock_irqrestore(esp->host->host_lock, flags);
2603 return SUCCESS;
2604
2605 out_failure:
2606
2607
2608
2609
2610 spin_unlock_irqrestore(esp->host->host_lock, flags);
2611 return FAILED;
2612 }
2613
2614 static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2615 {
2616 struct esp *esp = shost_priv(cmd->device->host);
2617 struct completion eh_reset;
2618 unsigned long flags;
2619
2620 init_completion(&eh_reset);
2621
2622 spin_lock_irqsave(esp->host->host_lock, flags);
2623
2624 esp->eh_reset = &eh_reset;
2625
2626
2627
2628
2629
2630
2631 esp->flags |= ESP_FLAG_RESETTING;
2632 scsi_esp_cmd(esp, ESP_CMD_RS);
2633
2634 spin_unlock_irqrestore(esp->host->host_lock, flags);
2635
2636 ssleep(esp_bus_reset_settle);
2637
2638 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2639 spin_lock_irqsave(esp->host->host_lock, flags);
2640 esp->eh_reset = NULL;
2641 spin_unlock_irqrestore(esp->host->host_lock, flags);
2642
2643 return FAILED;
2644 }
2645
2646 return SUCCESS;
2647 }
2648
2649
2650 static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2651 {
2652 struct esp *esp = shost_priv(cmd->device->host);
2653 unsigned long flags;
2654
2655 spin_lock_irqsave(esp->host->host_lock, flags);
2656 esp_bootup_reset(esp);
2657 esp_reset_cleanup(esp);
2658 spin_unlock_irqrestore(esp->host->host_lock, flags);
2659
2660 ssleep(esp_bus_reset_settle);
2661
2662 return SUCCESS;
2663 }
2664
2665 static const char *esp_info(struct Scsi_Host *host)
2666 {
2667 return "esp";
2668 }
2669
2670 struct scsi_host_template scsi_esp_template = {
2671 .module = THIS_MODULE,
2672 .name = "esp",
2673 .info = esp_info,
2674 .queuecommand = esp_queuecommand,
2675 .target_alloc = esp_target_alloc,
2676 .target_destroy = esp_target_destroy,
2677 .slave_alloc = esp_slave_alloc,
2678 .slave_configure = esp_slave_configure,
2679 .slave_destroy = esp_slave_destroy,
2680 .eh_abort_handler = esp_eh_abort_handler,
2681 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2682 .eh_host_reset_handler = esp_eh_host_reset_handler,
2683 .can_queue = 7,
2684 .this_id = 7,
2685 .sg_tablesize = SG_ALL,
2686 .max_sectors = 0xffff,
2687 .skip_settle_delay = 1,
2688 };
2689 EXPORT_SYMBOL(scsi_esp_template);
2690
2691 static void esp_get_signalling(struct Scsi_Host *host)
2692 {
2693 struct esp *esp = shost_priv(host);
2694 enum spi_signal_type type;
2695
2696 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2697 type = SPI_SIGNAL_HVD;
2698 else
2699 type = SPI_SIGNAL_SE;
2700
2701 spi_signalling(host) = type;
2702 }
2703
2704 static void esp_set_offset(struct scsi_target *target, int offset)
2705 {
2706 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2707 struct esp *esp = shost_priv(host);
2708 struct esp_target_data *tp = &esp->target[target->id];
2709
2710 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2711 tp->nego_goal_offset = 0;
2712 else
2713 tp->nego_goal_offset = offset;
2714 tp->flags |= ESP_TGT_CHECK_NEGO;
2715 }
2716
2717 static void esp_set_period(struct scsi_target *target, int period)
2718 {
2719 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2720 struct esp *esp = shost_priv(host);
2721 struct esp_target_data *tp = &esp->target[target->id];
2722
2723 tp->nego_goal_period = period;
2724 tp->flags |= ESP_TGT_CHECK_NEGO;
2725 }
2726
2727 static void esp_set_width(struct scsi_target *target, int width)
2728 {
2729 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2730 struct esp *esp = shost_priv(host);
2731 struct esp_target_data *tp = &esp->target[target->id];
2732
2733 tp->nego_goal_width = (width ? 1 : 0);
2734 tp->flags |= ESP_TGT_CHECK_NEGO;
2735 }
2736
2737 static struct spi_function_template esp_transport_ops = {
2738 .set_offset = esp_set_offset,
2739 .show_offset = 1,
2740 .set_period = esp_set_period,
2741 .show_period = 1,
2742 .set_width = esp_set_width,
2743 .show_width = 1,
2744 .get_signalling = esp_get_signalling,
2745 };
2746
2747 static int __init esp_init(void)
2748 {
2749 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2750 sizeof(struct esp_cmd_priv));
2751
2752 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2753 if (!esp_transport_template)
2754 return -ENODEV;
2755
2756 return 0;
2757 }
2758
2759 static void __exit esp_exit(void)
2760 {
2761 spi_release_transport(esp_transport_template);
2762 }
2763
2764 MODULE_DESCRIPTION("ESP SCSI driver core");
2765 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2766 MODULE_LICENSE("GPL");
2767 MODULE_VERSION(DRV_VERSION);
2768
2769 module_param(esp_bus_reset_settle, int, 0);
2770 MODULE_PARM_DESC(esp_bus_reset_settle,
2771 "ESP scsi bus reset delay in seconds");
2772
2773 module_param(esp_debug, int, 0);
2774 MODULE_PARM_DESC(esp_debug,
2775 "ESP bitmapped debugging message enable value:\n"
2776 " 0x00000001 Log interrupt events\n"
2777 " 0x00000002 Log scsi commands\n"
2778 " 0x00000004 Log resets\n"
2779 " 0x00000008 Log message in events\n"
2780 " 0x00000010 Log message out events\n"
2781 " 0x00000020 Log command completion\n"
2782 " 0x00000040 Log disconnects\n"
2783 " 0x00000080 Log data start\n"
2784 " 0x00000100 Log data done\n"
2785 " 0x00000200 Log reconnects\n"
2786 " 0x00000400 Log auto-sense data\n"
2787 );
2788
2789 module_init(esp_init);
2790 module_exit(esp_exit);
2791
2792 #ifdef CONFIG_SCSI_ESP_PIO
2793 static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2794 {
2795 int i = 500000;
2796
2797 do {
2798 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2799
2800 if (fbytes)
2801 return fbytes;
2802
2803 udelay(1);
2804 } while (--i);
2805
2806 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2807 esp_read8(ESP_STATUS));
2808 return 0;
2809 }
2810
2811 static inline int esp_wait_for_intr(struct esp *esp)
2812 {
2813 int i = 500000;
2814
2815 do {
2816 esp->sreg = esp_read8(ESP_STATUS);
2817 if (esp->sreg & ESP_STAT_INTR)
2818 return 0;
2819
2820 udelay(1);
2821 } while (--i);
2822
2823 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2824 esp->sreg);
2825 return 1;
2826 }
2827
2828 #define ESP_FIFO_SIZE 16
2829
2830 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2831 u32 dma_count, int write, u8 cmd)
2832 {
2833 u8 phase = esp->sreg & ESP_STAT_PMASK;
2834
2835 cmd &= ~ESP_CMD_DMA;
2836 esp->send_cmd_error = 0;
2837
2838 if (write) {
2839 u8 *dst = (u8 *)addr;
2840 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2841
2842 scsi_esp_cmd(esp, cmd);
2843
2844 while (1) {
2845 if (!esp_wait_for_fifo(esp))
2846 break;
2847
2848 *dst++ = readb(esp->fifo_reg);
2849 --esp_count;
2850
2851 if (!esp_count)
2852 break;
2853
2854 if (esp_wait_for_intr(esp)) {
2855 esp->send_cmd_error = 1;
2856 break;
2857 }
2858
2859 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2860 break;
2861
2862 esp->ireg = esp_read8(ESP_INTRPT);
2863 if (esp->ireg & mask) {
2864 esp->send_cmd_error = 1;
2865 break;
2866 }
2867
2868 if (phase == ESP_MIP)
2869 esp_write8(ESP_CMD_MOK, ESP_CMD);
2870
2871 esp_write8(ESP_CMD_TI, ESP_CMD);
2872 }
2873 } else {
2874 unsigned int n = ESP_FIFO_SIZE;
2875 u8 *src = (u8 *)addr;
2876
2877 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2878
2879 if (n > esp_count)
2880 n = esp_count;
2881 writesb(esp->fifo_reg, src, n);
2882 src += n;
2883 esp_count -= n;
2884
2885 scsi_esp_cmd(esp, cmd);
2886
2887 while (esp_count) {
2888 if (esp_wait_for_intr(esp)) {
2889 esp->send_cmd_error = 1;
2890 break;
2891 }
2892
2893 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2894 break;
2895
2896 esp->ireg = esp_read8(ESP_INTRPT);
2897 if (esp->ireg & ~ESP_INTR_BSERV) {
2898 esp->send_cmd_error = 1;
2899 break;
2900 }
2901
2902 n = ESP_FIFO_SIZE -
2903 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2904
2905 if (n > esp_count)
2906 n = esp_count;
2907 writesb(esp->fifo_reg, src, n);
2908 src += n;
2909 esp_count -= n;
2910
2911 esp_write8(ESP_CMD_TI, ESP_CMD);
2912 }
2913 }
2914
2915 esp->send_cmd_residual = esp_count;
2916 }
2917 EXPORT_SYMBOL(esp_send_pio_cmd);
2918 #endif