This source file includes following definitions.
- qla2100_intr_handler
- qla2x00_check_reg32_for_disconnect
- qla2x00_check_reg16_for_disconnect
- qla2300_intr_handler
- qla2x00_mbx_completion
- qla81xx_idc_event
- qla2x00_get_link_speed_str
- qla83xx_handle_8200_aen
- qla2x00_is_a_vp_did
- qla2x00_find_fcport_by_loopid
- qla2x00_find_fcport_by_wwpn
- qla2x00_find_fcport_by_nportid
- qla2x00_async_event
- qla2x00_process_completed_request
- qla2x00_get_sp_from_handle
- qla2x00_mbx_iocb_entry
- qla24xx_mbx_iocb_entry
- qla24xxx_nack_iocb_entry
- qla2x00_ct_entry
- qla24xx_els_ct_entry
- qla24xx_logio_entry
- qla24xx_tm_iocb_entry
- qla24xx_nvme_iocb_entry
- qla_ctrlvp_completed
- qla2x00_process_response_entry
- qla2x00_process_response_queue
- qla2x00_handle_sense
- qla2x00_handle_dif_error
- qla25xx_process_bidir_status_iocb
- qla2x00_status_entry
- qla2x00_status_cont_entry
- qla2x00_error_entry
- qla24xx_mbx_completion
- qla24xx_abort_iocb_entry
- qla24xx_nvme_ls4_iocb
- qla24xx_process_response_queue
- qla2xxx_check_risc_status
- qla24xx_intr_handler
- qla24xx_msix_rsp_q
- qla24xx_msix_default
- qla2xxx_msix_rsp_q
- qla24xx_enable_msix
- qla2x00_request_irqs
- qla2x00_free_irqs
- qla25xx_request_irq
1
2
3
4
5
6
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
19
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
25
26 const char *const port_state_str[] = {
27 "Unknown",
28 "UNCONFIGURED",
29 "DEAD",
30 "LOST",
31 "ONLINE"
32 };
33
34
35
36
37
38
39
40
41
42
43 irqreturn_t
44 qla2100_intr_handler(int irq, void *dev_id)
45 {
46 scsi_qla_host_t *vha;
47 struct qla_hw_data *ha;
48 struct device_reg_2xxx __iomem *reg;
49 int status;
50 unsigned long iter;
51 uint16_t hccr;
52 uint16_t mb[8];
53 struct rsp_que *rsp;
54 unsigned long flags;
55
56 rsp = (struct rsp_que *) dev_id;
57 if (!rsp) {
58 ql_log(ql_log_info, NULL, 0x505d,
59 "%s: NULL response queue pointer.\n", __func__);
60 return (IRQ_NONE);
61 }
62
63 ha = rsp->hw;
64 reg = &ha->iobase->isp;
65 status = 0;
66
67 spin_lock_irqsave(&ha->hardware_lock, flags);
68 vha = pci_get_drvdata(ha->pdev);
69 for (iter = 50; iter--; ) {
70 hccr = RD_REG_WORD(®->hccr);
71 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
72 break;
73 if (hccr & HCCR_RISC_PAUSE) {
74 if (pci_channel_offline(ha->pdev))
75 break;
76
77
78
79
80
81
82 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
83 RD_REG_WORD(®->hccr);
84
85 ha->isp_ops->fw_dump(vha, 1);
86 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
87 break;
88 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
89 break;
90
91 if (RD_REG_WORD(®->semaphore) & BIT_0) {
92 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
93 RD_REG_WORD(®->hccr);
94
95
96 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
97 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
98 qla2x00_mbx_completion(vha, mb[0]);
99 status |= MBX_INTERRUPT;
100 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
101 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
102 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
103 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
104 qla2x00_async_event(vha, rsp, mb);
105 } else {
106
107 ql_dbg(ql_dbg_async, vha, 0x5025,
108 "Unrecognized interrupt type (%d).\n",
109 mb[0]);
110 }
111
112 WRT_REG_WORD(®->semaphore, 0);
113 RD_REG_WORD(®->semaphore);
114 } else {
115 qla2x00_process_response_queue(rsp);
116
117 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
118 RD_REG_WORD(®->hccr);
119 }
120 }
121 qla2x00_handle_mbx_completion(ha, status);
122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
123
124 return (IRQ_HANDLED);
125 }
126
127 bool
128 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
129 {
130
131 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
132 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
133 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
134 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
135
136
137
138
139
140 schedule_work(&vha->hw->board_disable);
141 }
142 return true;
143 } else
144 return false;
145 }
146
147 bool
148 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
149 {
150 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
151 }
152
153
154
155
156
157
158
159
160
161
162 irqreturn_t
163 qla2300_intr_handler(int irq, void *dev_id)
164 {
165 scsi_qla_host_t *vha;
166 struct device_reg_2xxx __iomem *reg;
167 int status;
168 unsigned long iter;
169 uint32_t stat;
170 uint16_t hccr;
171 uint16_t mb[8];
172 struct rsp_que *rsp;
173 struct qla_hw_data *ha;
174 unsigned long flags;
175
176 rsp = (struct rsp_que *) dev_id;
177 if (!rsp) {
178 ql_log(ql_log_info, NULL, 0x5058,
179 "%s: NULL response queue pointer.\n", __func__);
180 return (IRQ_NONE);
181 }
182
183 ha = rsp->hw;
184 reg = &ha->iobase->isp;
185 status = 0;
186
187 spin_lock_irqsave(&ha->hardware_lock, flags);
188 vha = pci_get_drvdata(ha->pdev);
189 for (iter = 50; iter--; ) {
190 stat = RD_REG_DWORD(®->u.isp2300.host_status);
191 if (qla2x00_check_reg32_for_disconnect(vha, stat))
192 break;
193 if (stat & HSR_RISC_PAUSED) {
194 if (unlikely(pci_channel_offline(ha->pdev)))
195 break;
196
197 hccr = RD_REG_WORD(®->hccr);
198
199 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
200 ql_log(ql_log_warn, vha, 0x5026,
201 "Parity error -- HCCR=%x, Dumping "
202 "firmware.\n", hccr);
203 else
204 ql_log(ql_log_warn, vha, 0x5027,
205 "RISC paused -- HCCR=%x, Dumping "
206 "firmware.\n", hccr);
207
208
209
210
211
212
213 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
214 RD_REG_WORD(®->hccr);
215
216 ha->isp_ops->fw_dump(vha, 1);
217 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
218 break;
219 } else if ((stat & HSR_RISC_INT) == 0)
220 break;
221
222 switch (stat & 0xff) {
223 case 0x1:
224 case 0x2:
225 case 0x10:
226 case 0x11:
227 qla2x00_mbx_completion(vha, MSW(stat));
228 status |= MBX_INTERRUPT;
229
230
231 WRT_REG_WORD(®->semaphore, 0);
232 break;
233 case 0x12:
234 mb[0] = MSW(stat);
235 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
236 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
237 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
238 qla2x00_async_event(vha, rsp, mb);
239 break;
240 case 0x13:
241 qla2x00_process_response_queue(rsp);
242 break;
243 case 0x15:
244 mb[0] = MBA_CMPLT_1_16BIT;
245 mb[1] = MSW(stat);
246 qla2x00_async_event(vha, rsp, mb);
247 break;
248 case 0x16:
249 mb[0] = MBA_SCSI_COMPLETION;
250 mb[1] = MSW(stat);
251 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
252 qla2x00_async_event(vha, rsp, mb);
253 break;
254 default:
255 ql_dbg(ql_dbg_async, vha, 0x5028,
256 "Unrecognized interrupt type (%d).\n", stat & 0xff);
257 break;
258 }
259 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
260 RD_REG_WORD_RELAXED(®->hccr);
261 }
262 qla2x00_handle_mbx_completion(ha, status);
263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
264
265 return (IRQ_HANDLED);
266 }
267
268
269
270
271
272
273 static void
274 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
275 {
276 uint16_t cnt;
277 uint32_t mboxes;
278 uint16_t __iomem *wptr;
279 struct qla_hw_data *ha = vha->hw;
280 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
281
282
283 WARN_ON_ONCE(ha->mbx_count > 32);
284 mboxes = (1ULL << ha->mbx_count) - 1;
285 if (!ha->mcp)
286 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
287 else
288 mboxes = ha->mcp->in_mb;
289
290
291 ha->flags.mbox_int = 1;
292 ha->mailbox_out[0] = mb0;
293 mboxes >>= 1;
294 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
295
296 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
297 if (IS_QLA2200(ha) && cnt == 8)
298 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
299 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
300 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
301 else if (mboxes & BIT_0)
302 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
303
304 wptr++;
305 mboxes >>= 1;
306 }
307 }
308
309 static void
310 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
311 {
312 static char *event[] =
313 { "Complete", "Request Notification", "Time Extension" };
314 int rval;
315 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
316 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
317 uint16_t __iomem *wptr;
318 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
319
320
321 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
322 wptr = (uint16_t __iomem *)®24->mailbox1;
323 else if (IS_QLA8044(vha->hw))
324 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
325 else
326 return;
327
328 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
329 mb[cnt] = RD_REG_WORD(wptr);
330
331 ql_dbg(ql_dbg_async, vha, 0x5021,
332 "Inter-Driver Communication %s -- "
333 "%04x %04x %04x %04x %04x %04x %04x.\n",
334 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
335 mb[4], mb[5], mb[6]);
336 switch (aen) {
337
338 case MBA_IDC_COMPLETE:
339 if (mb[1] >> 15) {
340 vha->hw->flags.idc_compl_status = 1;
341 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
342 complete(&vha->hw->dcbx_comp);
343 }
344 break;
345
346 case MBA_IDC_NOTIFY:
347
348 timeout = (descr >> 8) & 0xf;
349 ql_dbg(ql_dbg_async, vha, 0x5022,
350 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
351 vha->host_no, event[aen & 0xff], timeout);
352
353 if (!timeout)
354 return;
355 rval = qla2x00_post_idc_ack_work(vha, mb);
356 if (rval != QLA_SUCCESS)
357 ql_log(ql_log_warn, vha, 0x5023,
358 "IDC failed to post ACK.\n");
359 break;
360 case MBA_IDC_TIME_EXT:
361 vha->hw->idc_extend_tmo = descr;
362 ql_dbg(ql_dbg_async, vha, 0x5087,
363 "%lu Inter-Driver Communication %s -- "
364 "Extend timeout by=%d.\n",
365 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
366 break;
367 }
368 }
369
370 #define LS_UNKNOWN 2
371 const char *
372 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
373 {
374 static const char *const link_speeds[] = {
375 "1", "2", "?", "4", "8", "16", "32", "10"
376 };
377 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
378
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return link_speeds[0];
381 else if (speed == 0x13)
382 return link_speeds[QLA_LAST_SPEED];
383 else if (speed < QLA_LAST_SPEED)
384 return link_speeds[speed];
385 else
386 return link_speeds[LS_UNKNOWN];
387 }
388
389 static void
390 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
391 {
392 struct qla_hw_data *ha = vha->hw;
393
394
395
396
397
398
399
400
401
402
403
404
405 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
406 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
407 mb[0], mb[1], mb[2], mb[6]);
408 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
409 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
410 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
411
412 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
413 IDC_HEARTBEAT_FAILURE)) {
414 ha->flags.nic_core_hung = 1;
415 ql_log(ql_log_warn, vha, 0x5060,
416 "83XX: F/W Error Reported: Check if reset required.\n");
417
418 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
419 uint32_t protocol_engine_id, fw_err_code, err_level;
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434 protocol_engine_id = (mb[2] & 0xff);
435 fw_err_code = (((mb[2] & 0xff00) >> 8) |
436 ((mb[6] & 0x1fff) << 8));
437 err_level = ((mb[6] & 0xe000) >> 13);
438 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
439 "Register: protocol_engine_id=0x%x "
440 "fw_err_code=0x%x err_level=0x%x.\n",
441 protocol_engine_id, fw_err_code, err_level);
442 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
443 "Register: 0x%x%x.\n", mb[7], mb[3]);
444 if (err_level == ERR_LEVEL_NON_FATAL) {
445 ql_log(ql_log_warn, vha, 0x5063,
446 "Not a fatal error, f/w has recovered itself.\n");
447 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
448 ql_log(ql_log_fatal, vha, 0x5064,
449 "Recoverable Fatal error: Chip reset "
450 "required.\n");
451 qla83xx_schedule_work(vha,
452 QLA83XX_NIC_CORE_RESET);
453 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
454 ql_log(ql_log_fatal, vha, 0x5065,
455 "Unrecoverable Fatal error: Set FAILED "
456 "state, reboot required.\n");
457 qla83xx_schedule_work(vha,
458 QLA83XX_NIC_CORE_UNRECOVERABLE);
459 }
460 }
461
462 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
463 uint16_t peg_fw_state, nw_interface_link_up;
464 uint16_t nw_interface_signal_detect, sfp_status;
465 uint16_t htbt_counter, htbt_monitor_enable;
466 uint16_t sfp_additional_info, sfp_multirate;
467 uint16_t sfp_tx_fault, link_speed, dcbx_status;
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500 peg_fw_state = (mb[2] & 0x00ff);
501 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
502 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
503 sfp_status = ((mb[2] & 0x0c00) >> 10);
504 htbt_counter = ((mb[2] & 0x7000) >> 12);
505 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
506 sfp_additional_info = (mb[6] & 0x0003);
507 sfp_multirate = ((mb[6] & 0x0004) >> 2);
508 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
509 link_speed = ((mb[6] & 0x0070) >> 4);
510 dcbx_status = ((mb[6] & 0x7000) >> 12);
511
512 ql_log(ql_log_warn, vha, 0x5066,
513 "Peg-to-Fc Status Register:\n"
514 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
515 "nw_interface_signal_detect=0x%x"
516 "\nsfp_statis=0x%x.\n ", peg_fw_state,
517 nw_interface_link_up, nw_interface_signal_detect,
518 sfp_status);
519 ql_log(ql_log_warn, vha, 0x5067,
520 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
521 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
522 htbt_counter, htbt_monitor_enable,
523 sfp_additional_info, sfp_multirate);
524 ql_log(ql_log_warn, vha, 0x5068,
525 "sfp_tx_fault=0x%x, link_state=0x%x, "
526 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
527 dcbx_status);
528
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
530 }
531
532 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
533 ql_log(ql_log_warn, vha, 0x5069,
534 "Heartbeat Failure encountered, chip reset "
535 "required.\n");
536
537 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
538 }
539 }
540
541 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
542 ql_log(ql_log_info, vha, 0x506a,
543 "IDC Device-State changed = 0x%x.\n", mb[4]);
544 if (ha->flags.nic_core_reset_owner)
545 return;
546 qla83xx_schedule_work(vha, MBA_IDC_AEN);
547 }
548 }
549
550 int
551 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
552 {
553 struct qla_hw_data *ha = vha->hw;
554 scsi_qla_host_t *vp;
555 uint32_t vp_did;
556 unsigned long flags;
557 int ret = 0;
558
559 if (!ha->num_vhosts)
560 return ret;
561
562 spin_lock_irqsave(&ha->vport_slock, flags);
563 list_for_each_entry(vp, &ha->vp_list, list) {
564 vp_did = vp->d_id.b24;
565 if (vp_did == rscn_entry) {
566 ret = 1;
567 break;
568 }
569 }
570 spin_unlock_irqrestore(&ha->vport_slock, flags);
571
572 return ret;
573 }
574
575 fc_port_t *
576 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
577 {
578 fc_port_t *f, *tf;
579
580 f = tf = NULL;
581 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
582 if (f->loop_id == loop_id)
583 return f;
584 return NULL;
585 }
586
587 fc_port_t *
588 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
589 {
590 fc_port_t *f, *tf;
591
592 f = tf = NULL;
593 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
594 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
595 if (incl_deleted)
596 return f;
597 else if (f->deleted == 0)
598 return f;
599 }
600 }
601 return NULL;
602 }
603
604 fc_port_t *
605 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
606 u8 incl_deleted)
607 {
608 fc_port_t *f, *tf;
609
610 f = tf = NULL;
611 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
612 if (f->d_id.b24 == id->b24) {
613 if (incl_deleted)
614 return f;
615 else if (f->deleted == 0)
616 return f;
617 }
618 }
619 return NULL;
620 }
621
622
623
624
625
626
627
628 void
629 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
630 {
631 uint16_t handle_cnt;
632 uint16_t cnt, mbx;
633 uint32_t handles[5];
634 struct qla_hw_data *ha = vha->hw;
635 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
636 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
637 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
638 uint32_t rscn_entry, host_pid;
639 unsigned long flags;
640 fc_port_t *fcport = NULL;
641
642 if (!vha->hw->flags.fw_started)
643 return;
644
645
646 handle_cnt = 0;
647 if (IS_CNA_CAPABLE(ha))
648 goto skip_rio;
649 switch (mb[0]) {
650 case MBA_SCSI_COMPLETION:
651 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
652 handle_cnt = 1;
653 break;
654 case MBA_CMPLT_1_16BIT:
655 handles[0] = mb[1];
656 handle_cnt = 1;
657 mb[0] = MBA_SCSI_COMPLETION;
658 break;
659 case MBA_CMPLT_2_16BIT:
660 handles[0] = mb[1];
661 handles[1] = mb[2];
662 handle_cnt = 2;
663 mb[0] = MBA_SCSI_COMPLETION;
664 break;
665 case MBA_CMPLT_3_16BIT:
666 handles[0] = mb[1];
667 handles[1] = mb[2];
668 handles[2] = mb[3];
669 handle_cnt = 3;
670 mb[0] = MBA_SCSI_COMPLETION;
671 break;
672 case MBA_CMPLT_4_16BIT:
673 handles[0] = mb[1];
674 handles[1] = mb[2];
675 handles[2] = mb[3];
676 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
677 handle_cnt = 4;
678 mb[0] = MBA_SCSI_COMPLETION;
679 break;
680 case MBA_CMPLT_5_16BIT:
681 handles[0] = mb[1];
682 handles[1] = mb[2];
683 handles[2] = mb[3];
684 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
685 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
686 handle_cnt = 5;
687 mb[0] = MBA_SCSI_COMPLETION;
688 break;
689 case MBA_CMPLT_2_32BIT:
690 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
691 handles[1] = le32_to_cpu(
692 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
693 RD_MAILBOX_REG(ha, reg, 6));
694 handle_cnt = 2;
695 mb[0] = MBA_SCSI_COMPLETION;
696 break;
697 default:
698 break;
699 }
700 skip_rio:
701 switch (mb[0]) {
702 case MBA_SCSI_COMPLETION:
703 if (!vha->flags.online)
704 break;
705
706 for (cnt = 0; cnt < handle_cnt; cnt++)
707 qla2x00_process_completed_request(vha, rsp->req,
708 handles[cnt]);
709 break;
710
711 case MBA_RESET:
712 ql_dbg(ql_dbg_async, vha, 0x5002,
713 "Asynchronous RESET.\n");
714
715 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
716 break;
717
718 case MBA_SYSTEM_ERR:
719 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
720 IS_QLA28XX(ha)) ?
721 RD_REG_WORD(®24->mailbox7) : 0;
722 ql_log(ql_log_warn, vha, 0x5003,
723 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
724 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
725 ha->fw_dump_mpi =
726 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
727 RD_REG_WORD(®24->mailbox7) & BIT_8;
728 ha->isp_ops->fw_dump(vha, 1);
729 ha->flags.fw_init_done = 0;
730 QLA_FW_STOPPED(ha);
731
732 if (IS_FWI2_CAPABLE(ha)) {
733 if (mb[1] == 0 && mb[2] == 0) {
734 ql_log(ql_log_fatal, vha, 0x5004,
735 "Unrecoverable Hardware Error: adapter "
736 "marked OFFLINE!\n");
737 vha->flags.online = 0;
738 vha->device_flags |= DFLG_DEV_FAILED;
739 } else {
740
741 if ((mbx & MBX_3) && (ha->port_no == 0))
742 set_bit(MPI_RESET_NEEDED,
743 &vha->dpc_flags);
744
745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
746 }
747 } else if (mb[1] == 0) {
748 ql_log(ql_log_fatal, vha, 0x5005,
749 "Unrecoverable Hardware Error: adapter marked "
750 "OFFLINE!\n");
751 vha->flags.online = 0;
752 vha->device_flags |= DFLG_DEV_FAILED;
753 } else
754 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
755 break;
756
757 case MBA_REQ_TRANSFER_ERR:
758 ql_log(ql_log_warn, vha, 0x5006,
759 "ISP Request Transfer Error (%x).\n", mb[1]);
760
761 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
762 break;
763
764 case MBA_RSP_TRANSFER_ERR:
765 ql_log(ql_log_warn, vha, 0x5007,
766 "ISP Response Transfer Error (%x).\n", mb[1]);
767
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 break;
770
771 case MBA_WAKEUP_THRES:
772 ql_dbg(ql_dbg_async, vha, 0x5008,
773 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
774 break;
775
776 case MBA_LOOP_INIT_ERR:
777 ql_log(ql_log_warn, vha, 0x5090,
778 "LOOP INIT ERROR (%x).\n", mb[1]);
779 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
780 break;
781
782 case MBA_LIP_OCCURRED:
783 ha->flags.lip_ae = 1;
784
785 ql_dbg(ql_dbg_async, vha, 0x5009,
786 "LIP occurred (%x).\n", mb[1]);
787
788 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 atomic_set(&vha->loop_state, LOOP_DOWN);
790 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
791 qla2x00_mark_all_devices_lost(vha, 1);
792 }
793
794 if (vha->vp_idx) {
795 atomic_set(&vha->vp_state, VP_FAILED);
796 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
797 }
798
799 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
800 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
801
802 vha->flags.management_server_logged_in = 0;
803 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
804 break;
805
806 case MBA_LOOP_UP:
807 if (IS_QLA2100(ha) || IS_QLA2200(ha))
808 ha->link_data_rate = PORT_SPEED_1GB;
809 else
810 ha->link_data_rate = mb[1];
811
812 ql_log(ql_log_info, vha, 0x500a,
813 "LOOP UP detected (%s Gbps).\n",
814 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
815
816 vha->flags.management_server_logged_in = 0;
817 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
818
819 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
820 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
821 qla2xxx_wake_dpc(vha);
822 }
823 break;
824
825 case MBA_LOOP_DOWN:
826 SAVE_TOPO(ha);
827 ha->flags.lip_ae = 0;
828 ha->current_topology = 0;
829
830 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
831 ? RD_REG_WORD(®24->mailbox4) : 0;
832 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
833 : mbx;
834 ql_log(ql_log_info, vha, 0x500b,
835 "LOOP DOWN detected (%x %x %x %x).\n",
836 mb[1], mb[2], mb[3], mbx);
837
838 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
839 atomic_set(&vha->loop_state, LOOP_DOWN);
840 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
841
842
843
844
845
846 if (!vha->vp_idx) {
847 if (ha->flags.fawwpn_enabled &&
848 (ha->current_topology == ISP_CFG_F)) {
849 void *wwpn = ha->init_cb->port_name;
850
851 memcpy(vha->port_name, wwpn, WWN_SIZE);
852 fc_host_port_name(vha->host) =
853 wwn_to_u64(vha->port_name);
854 ql_dbg(ql_dbg_init + ql_dbg_verbose,
855 vha, 0x00d8, "LOOP DOWN detected,"
856 "restore WWPN %016llx\n",
857 wwn_to_u64(vha->port_name));
858 }
859
860 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
861 }
862
863 vha->device_flags |= DFLG_NO_CABLE;
864 qla2x00_mark_all_devices_lost(vha, 1);
865 }
866
867 if (vha->vp_idx) {
868 atomic_set(&vha->vp_state, VP_FAILED);
869 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
870 }
871
872 vha->flags.management_server_logged_in = 0;
873 ha->link_data_rate = PORT_SPEED_UNKNOWN;
874 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
875 break;
876
877 case MBA_LIP_RESET:
878 ql_dbg(ql_dbg_async, vha, 0x500c,
879 "LIP reset occurred (%x).\n", mb[1]);
880
881 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
882 atomic_set(&vha->loop_state, LOOP_DOWN);
883 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
884 qla2x00_mark_all_devices_lost(vha, 1);
885 }
886
887 if (vha->vp_idx) {
888 atomic_set(&vha->vp_state, VP_FAILED);
889 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
890 }
891
892 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
893
894 ha->operating_mode = LOOP;
895 vha->flags.management_server_logged_in = 0;
896 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
897 break;
898
899
900 case MBA_POINT_TO_POINT:
901 ha->flags.lip_ae = 0;
902
903 if (IS_QLA2100(ha))
904 break;
905
906 if (IS_CNA_CAPABLE(ha)) {
907 ql_dbg(ql_dbg_async, vha, 0x500d,
908 "DCBX Completed -- %04x %04x %04x.\n",
909 mb[1], mb[2], mb[3]);
910 if (ha->notify_dcbx_comp && !vha->vp_idx)
911 complete(&ha->dcbx_comp);
912
913 } else
914 ql_dbg(ql_dbg_async, vha, 0x500e,
915 "Asynchronous P2P MODE received.\n");
916
917
918
919
920
921 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
922 atomic_set(&vha->loop_state, LOOP_DOWN);
923 if (!atomic_read(&vha->loop_down_timer))
924 atomic_set(&vha->loop_down_timer,
925 LOOP_DOWN_TIME);
926 if (!N2N_TOPO(ha))
927 qla2x00_mark_all_devices_lost(vha, 1);
928 }
929
930 if (vha->vp_idx) {
931 atomic_set(&vha->vp_state, VP_FAILED);
932 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
933 }
934
935 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
936 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
937
938 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
939 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
940
941 vha->flags.management_server_logged_in = 0;
942 break;
943
944 case MBA_CHG_IN_CONNECTION:
945 if (IS_QLA2100(ha))
946 break;
947
948 ql_dbg(ql_dbg_async, vha, 0x500f,
949 "Configuration change detected: value=%x.\n", mb[1]);
950
951 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
952 atomic_set(&vha->loop_state, LOOP_DOWN);
953 if (!atomic_read(&vha->loop_down_timer))
954 atomic_set(&vha->loop_down_timer,
955 LOOP_DOWN_TIME);
956 qla2x00_mark_all_devices_lost(vha, 1);
957 }
958
959 if (vha->vp_idx) {
960 atomic_set(&vha->vp_state, VP_FAILED);
961 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
962 }
963
964 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
965 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
966 break;
967
968 case MBA_PORT_UPDATE:
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984 if (IS_QLA2XXX_MIDTYPE(ha) &&
985 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
986 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
987 break;
988
989 if (mb[2] == 0x7) {
990 ql_dbg(ql_dbg_async, vha, 0x5010,
991 "Port %s %04x %04x %04x.\n",
992 mb[1] == 0xffff ? "unavailable" : "logout",
993 mb[1], mb[2], mb[3]);
994
995 if (mb[1] == 0xffff)
996 goto global_port_update;
997
998 if (mb[1] == NPH_SNS_LID(ha)) {
999 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1000 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1001 break;
1002 }
1003
1004
1005 if (IS_FWI2_CAPABLE(ha))
1006 handle_cnt = NPH_SNS;
1007 else
1008 handle_cnt = SIMPLE_NAME_SERVER;
1009 if (mb[1] == handle_cnt) {
1010 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1011 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1012 break;
1013 }
1014
1015
1016 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1017 if (!fcport)
1018 break;
1019 if (atomic_read(&fcport->state) != FCS_ONLINE)
1020 break;
1021 ql_dbg(ql_dbg_async, vha, 0x508a,
1022 "Marking port lost loopid=%04x portid=%06x.\n",
1023 fcport->loop_id, fcport->d_id.b24);
1024 if (qla_ini_mode_enabled(vha)) {
1025 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1026 fcport->logout_on_delete = 0;
1027 qlt_schedule_sess_for_deletion(fcport);
1028 }
1029 break;
1030
1031 global_port_update:
1032 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1033 atomic_set(&vha->loop_state, LOOP_DOWN);
1034 atomic_set(&vha->loop_down_timer,
1035 LOOP_DOWN_TIME);
1036 vha->device_flags |= DFLG_NO_CABLE;
1037 qla2x00_mark_all_devices_lost(vha, 1);
1038 }
1039
1040 if (vha->vp_idx) {
1041 atomic_set(&vha->vp_state, VP_FAILED);
1042 fc_vport_set_state(vha->fc_vport,
1043 FC_VPORT_FAILED);
1044 qla2x00_mark_all_devices_lost(vha, 1);
1045 }
1046
1047 vha->flags.management_server_logged_in = 0;
1048 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1049 break;
1050 }
1051
1052
1053
1054
1055
1056
1057 atomic_set(&vha->loop_down_timer, 0);
1058 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1059 !ha->flags.n2n_ae &&
1060 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1061 ql_dbg(ql_dbg_async, vha, 0x5011,
1062 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1063 mb[1], mb[2], mb[3]);
1064 break;
1065 }
1066
1067 ql_dbg(ql_dbg_async, vha, 0x5012,
1068 "Port database changed %04x %04x %04x.\n",
1069 mb[1], mb[2], mb[3]);
1070
1071
1072
1073
1074 atomic_set(&vha->loop_state, LOOP_UP);
1075 vha->scan.scan_retry = 0;
1076
1077 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1078 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1079 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1080 break;
1081
1082 case MBA_RSCN_UPDATE:
1083
1084 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1085 break;
1086
1087 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1088 break;
1089
1090 ql_dbg(ql_dbg_async, vha, 0x5013,
1091 "RSCN database changed -- %04x %04x %04x.\n",
1092 mb[1], mb[2], mb[3]);
1093
1094 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1095 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1096 | vha->d_id.b.al_pa;
1097 if (rscn_entry == host_pid) {
1098 ql_dbg(ql_dbg_async, vha, 0x5014,
1099 "Ignoring RSCN update to local host "
1100 "port ID (%06x).\n", host_pid);
1101 break;
1102 }
1103
1104
1105 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1106
1107
1108 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1109 break;
1110
1111 atomic_set(&vha->loop_down_timer, 0);
1112 vha->flags.management_server_logged_in = 0;
1113 {
1114 struct event_arg ea;
1115
1116 memset(&ea, 0, sizeof(ea));
1117 ea.id.b24 = rscn_entry;
1118 ea.id.b.rsvd_1 = rscn_entry >> 24;
1119 qla2x00_handle_rscn(vha, &ea);
1120 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1121 }
1122 break;
1123
1124 case MBA_ZIO_RESPONSE:
1125 ql_dbg(ql_dbg_async, vha, 0x5015,
1126 "[R|Z]IO update completion.\n");
1127
1128 if (IS_FWI2_CAPABLE(ha))
1129 qla24xx_process_response_queue(vha, rsp);
1130 else
1131 qla2x00_process_response_queue(rsp);
1132 break;
1133
1134 case MBA_DISCARD_RND_FRAME:
1135 ql_dbg(ql_dbg_async, vha, 0x5016,
1136 "Discard RND Frame -- %04x %04x %04x.\n",
1137 mb[1], mb[2], mb[3]);
1138 break;
1139
1140 case MBA_TRACE_NOTIFICATION:
1141 ql_dbg(ql_dbg_async, vha, 0x5017,
1142 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1143 break;
1144
1145 case MBA_ISP84XX_ALERT:
1146 ql_dbg(ql_dbg_async, vha, 0x5018,
1147 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1148 mb[1], mb[2], mb[3]);
1149
1150 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1151 switch (mb[1]) {
1152 case A84_PANIC_RECOVERY:
1153 ql_log(ql_log_info, vha, 0x5019,
1154 "Alert 84XX: panic recovery %04x %04x.\n",
1155 mb[2], mb[3]);
1156 break;
1157 case A84_OP_LOGIN_COMPLETE:
1158 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1159 ql_log(ql_log_info, vha, 0x501a,
1160 "Alert 84XX: firmware version %x.\n",
1161 ha->cs84xx->op_fw_version);
1162 break;
1163 case A84_DIAG_LOGIN_COMPLETE:
1164 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1165 ql_log(ql_log_info, vha, 0x501b,
1166 "Alert 84XX: diagnostic firmware version %x.\n",
1167 ha->cs84xx->diag_fw_version);
1168 break;
1169 case A84_GOLD_LOGIN_COMPLETE:
1170 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1171 ha->cs84xx->fw_update = 1;
1172 ql_log(ql_log_info, vha, 0x501c,
1173 "Alert 84XX: gold firmware version %x.\n",
1174 ha->cs84xx->gold_fw_version);
1175 break;
1176 default:
1177 ql_log(ql_log_warn, vha, 0x501d,
1178 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1179 mb[1], mb[2], mb[3]);
1180 }
1181 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1182 break;
1183 case MBA_DCBX_START:
1184 ql_dbg(ql_dbg_async, vha, 0x501e,
1185 "DCBX Started -- %04x %04x %04x.\n",
1186 mb[1], mb[2], mb[3]);
1187 break;
1188 case MBA_DCBX_PARAM_UPDATE:
1189 ql_dbg(ql_dbg_async, vha, 0x501f,
1190 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1191 mb[1], mb[2], mb[3]);
1192 break;
1193 case MBA_FCF_CONF_ERR:
1194 ql_dbg(ql_dbg_async, vha, 0x5020,
1195 "FCF Configuration Error -- %04x %04x %04x.\n",
1196 mb[1], mb[2], mb[3]);
1197 break;
1198 case MBA_IDC_NOTIFY:
1199 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1200 mb[4] = RD_REG_WORD(®24->mailbox4);
1201 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1202 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1203 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1204 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1205
1206
1207
1208 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1209 atomic_set(&vha->loop_down_timer,
1210 LOOP_DOWN_TIME);
1211 qla2xxx_wake_dpc(vha);
1212 }
1213 }
1214
1215 case MBA_IDC_COMPLETE:
1216 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1217 complete(&ha->lb_portup_comp);
1218
1219 case MBA_IDC_TIME_EXT:
1220 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1221 IS_QLA8044(ha))
1222 qla81xx_idc_event(vha, mb[0], mb[1]);
1223 break;
1224
1225 case MBA_IDC_AEN:
1226 mb[4] = RD_REG_WORD(®24->mailbox4);
1227 mb[5] = RD_REG_WORD(®24->mailbox5);
1228 mb[6] = RD_REG_WORD(®24->mailbox6);
1229 mb[7] = RD_REG_WORD(®24->mailbox7);
1230 qla83xx_handle_8200_aen(vha, mb);
1231 break;
1232
1233 case MBA_DPORT_DIAGNOSTICS:
1234 ql_dbg(ql_dbg_async, vha, 0x5052,
1235 "D-Port Diagnostics: %04x result=%s\n",
1236 mb[0],
1237 mb[1] == 0 ? "start" :
1238 mb[1] == 1 ? "done (pass)" :
1239 mb[1] == 2 ? "done (error)" : "other");
1240 break;
1241
1242 case MBA_TEMPERATURE_ALERT:
1243 ql_dbg(ql_dbg_async, vha, 0x505e,
1244 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1245 if (mb[1] == 0x12)
1246 schedule_work(&ha->board_disable);
1247 break;
1248
1249 case MBA_TRANS_INSERT:
1250 ql_dbg(ql_dbg_async, vha, 0x5091,
1251 "Transceiver Insertion: %04x\n", mb[1]);
1252 break;
1253
1254 default:
1255 ql_dbg(ql_dbg_async, vha, 0x5057,
1256 "Unknown AEN:%04x %04x %04x %04x\n",
1257 mb[0], mb[1], mb[2], mb[3]);
1258 }
1259
1260 qlt_async_event(mb[0], vha, mb);
1261
1262 if (!vha->vp_idx && ha->num_vhosts)
1263 qla2x00_alert_all_vps(rsp, mb);
1264 }
1265
1266
1267
1268
1269
1270
1271
1272 void
1273 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1274 struct req_que *req, uint32_t index)
1275 {
1276 srb_t *sp;
1277 struct qla_hw_data *ha = vha->hw;
1278
1279
1280 if (index >= req->num_outstanding_cmds) {
1281 ql_log(ql_log_warn, vha, 0x3014,
1282 "Invalid SCSI command index (%x).\n", index);
1283
1284 if (IS_P3P_TYPE(ha))
1285 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1286 else
1287 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1288 return;
1289 }
1290
1291 sp = req->outstanding_cmds[index];
1292 if (sp) {
1293
1294 req->outstanding_cmds[index] = NULL;
1295
1296
1297 sp->done(sp, DID_OK << 16);
1298 } else {
1299 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1300
1301 if (IS_P3P_TYPE(ha))
1302 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1303 else
1304 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1305 }
1306 }
1307
1308 srb_t *
1309 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1310 struct req_que *req, void *iocb)
1311 {
1312 struct qla_hw_data *ha = vha->hw;
1313 sts_entry_t *pkt = iocb;
1314 srb_t *sp = NULL;
1315 uint16_t index;
1316
1317 index = LSW(pkt->handle);
1318 if (index >= req->num_outstanding_cmds) {
1319 ql_log(ql_log_warn, vha, 0x5031,
1320 "Invalid command index (%x) type %8ph.\n",
1321 index, iocb);
1322 if (IS_P3P_TYPE(ha))
1323 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1324 else
1325 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1326 goto done;
1327 }
1328 sp = req->outstanding_cmds[index];
1329 if (!sp) {
1330 ql_log(ql_log_warn, vha, 0x5032,
1331 "Invalid completion handle (%x) -- timed-out.\n", index);
1332 return sp;
1333 }
1334 if (sp->handle != index) {
1335 ql_log(ql_log_warn, vha, 0x5033,
1336 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1337 return NULL;
1338 }
1339
1340 req->outstanding_cmds[index] = NULL;
1341
1342 done:
1343 return sp;
1344 }
1345
1346 static void
1347 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1348 struct mbx_entry *mbx)
1349 {
1350 const char func[] = "MBX-IOCB";
1351 const char *type;
1352 fc_port_t *fcport;
1353 srb_t *sp;
1354 struct srb_iocb *lio;
1355 uint16_t *data;
1356 uint16_t status;
1357
1358 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1359 if (!sp)
1360 return;
1361
1362 lio = &sp->u.iocb_cmd;
1363 type = sp->name;
1364 fcport = sp->fcport;
1365 data = lio->u.logio.data;
1366
1367 data[0] = MBS_COMMAND_ERROR;
1368 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1369 QLA_LOGIO_LOGIN_RETRIED : 0;
1370 if (mbx->entry_status) {
1371 ql_dbg(ql_dbg_async, vha, 0x5043,
1372 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1373 "entry-status=%x status=%x state-flag=%x "
1374 "status-flags=%x.\n", type, sp->handle,
1375 fcport->d_id.b.domain, fcport->d_id.b.area,
1376 fcport->d_id.b.al_pa, mbx->entry_status,
1377 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1378 le16_to_cpu(mbx->status_flags));
1379
1380 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1381 mbx, sizeof(*mbx));
1382
1383 goto logio_done;
1384 }
1385
1386 status = le16_to_cpu(mbx->status);
1387 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1388 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1389 status = 0;
1390 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1391 ql_dbg(ql_dbg_async, vha, 0x5045,
1392 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1393 type, sp->handle, fcport->d_id.b.domain,
1394 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1395 le16_to_cpu(mbx->mb1));
1396
1397 data[0] = MBS_COMMAND_COMPLETE;
1398 if (sp->type == SRB_LOGIN_CMD) {
1399 fcport->port_type = FCT_TARGET;
1400 if (le16_to_cpu(mbx->mb1) & BIT_0)
1401 fcport->port_type = FCT_INITIATOR;
1402 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1403 fcport->flags |= FCF_FCP2_DEVICE;
1404 }
1405 goto logio_done;
1406 }
1407
1408 data[0] = le16_to_cpu(mbx->mb0);
1409 switch (data[0]) {
1410 case MBS_PORT_ID_USED:
1411 data[1] = le16_to_cpu(mbx->mb1);
1412 break;
1413 case MBS_LOOP_ID_USED:
1414 break;
1415 default:
1416 data[0] = MBS_COMMAND_ERROR;
1417 break;
1418 }
1419
1420 ql_log(ql_log_warn, vha, 0x5046,
1421 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1422 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1423 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1424 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1425 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1426 le16_to_cpu(mbx->mb7));
1427
1428 logio_done:
1429 sp->done(sp, 0);
1430 }
1431
1432 static void
1433 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1434 struct mbx_24xx_entry *pkt)
1435 {
1436 const char func[] = "MBX-IOCB2";
1437 srb_t *sp;
1438 struct srb_iocb *si;
1439 u16 sz, i;
1440 int res;
1441
1442 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1443 if (!sp)
1444 return;
1445
1446 si = &sp->u.iocb_cmd;
1447 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1448
1449 for (i = 0; i < sz; i++)
1450 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1451
1452 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1453
1454 sp->done(sp, res);
1455 }
1456
1457 static void
1458 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1459 struct nack_to_isp *pkt)
1460 {
1461 const char func[] = "nack";
1462 srb_t *sp;
1463 int res = 0;
1464
1465 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1466 if (!sp)
1467 return;
1468
1469 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1470 res = QLA_FUNCTION_FAILED;
1471
1472 sp->done(sp, res);
1473 }
1474
1475 static void
1476 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1477 sts_entry_t *pkt, int iocb_type)
1478 {
1479 const char func[] = "CT_IOCB";
1480 const char *type;
1481 srb_t *sp;
1482 struct bsg_job *bsg_job;
1483 struct fc_bsg_reply *bsg_reply;
1484 uint16_t comp_status;
1485 int res = 0;
1486
1487 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1488 if (!sp)
1489 return;
1490
1491 switch (sp->type) {
1492 case SRB_CT_CMD:
1493 bsg_job = sp->u.bsg_job;
1494 bsg_reply = bsg_job->reply;
1495
1496 type = "ct pass-through";
1497
1498 comp_status = le16_to_cpu(pkt->comp_status);
1499
1500
1501
1502
1503
1504 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1505 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1506
1507 if (comp_status != CS_COMPLETE) {
1508 if (comp_status == CS_DATA_UNDERRUN) {
1509 res = DID_OK << 16;
1510 bsg_reply->reply_payload_rcv_len =
1511 le16_to_cpu(pkt->rsp_info_len);
1512
1513 ql_log(ql_log_warn, vha, 0x5048,
1514 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1515 type, comp_status,
1516 bsg_reply->reply_payload_rcv_len);
1517 } else {
1518 ql_log(ql_log_warn, vha, 0x5049,
1519 "CT pass-through-%s error comp_status=0x%x.\n",
1520 type, comp_status);
1521 res = DID_ERROR << 16;
1522 bsg_reply->reply_payload_rcv_len = 0;
1523 }
1524 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1525 pkt, sizeof(*pkt));
1526 } else {
1527 res = DID_OK << 16;
1528 bsg_reply->reply_payload_rcv_len =
1529 bsg_job->reply_payload.payload_len;
1530 bsg_job->reply_len = 0;
1531 }
1532 break;
1533 case SRB_CT_PTHRU_CMD:
1534
1535
1536
1537
1538 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1539 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1540 sp->name);
1541 break;
1542 }
1543
1544 sp->done(sp, res);
1545 }
1546
1547 static void
1548 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1549 struct sts_entry_24xx *pkt, int iocb_type)
1550 {
1551 const char func[] = "ELS_CT_IOCB";
1552 const char *type;
1553 srb_t *sp;
1554 struct bsg_job *bsg_job;
1555 struct fc_bsg_reply *bsg_reply;
1556 uint16_t comp_status;
1557 uint32_t fw_status[3];
1558 int res;
1559 struct srb_iocb *els;
1560
1561 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1562 if (!sp)
1563 return;
1564
1565 type = NULL;
1566 switch (sp->type) {
1567 case SRB_ELS_CMD_RPT:
1568 case SRB_ELS_CMD_HST:
1569 type = "els";
1570 break;
1571 case SRB_CT_CMD:
1572 type = "ct pass-through";
1573 break;
1574 case SRB_ELS_DCMD:
1575 type = "Driver ELS logo";
1576 if (iocb_type != ELS_IOCB_TYPE) {
1577 ql_dbg(ql_dbg_user, vha, 0x5047,
1578 "Completing %s: (%p) type=%d.\n",
1579 type, sp, sp->type);
1580 sp->done(sp, 0);
1581 return;
1582 }
1583 break;
1584 case SRB_CT_PTHRU_CMD:
1585
1586
1587
1588 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1589 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1590 sp->name);
1591 sp->done(sp, res);
1592 return;
1593 default:
1594 ql_dbg(ql_dbg_user, vha, 0x503e,
1595 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1596 return;
1597 }
1598
1599 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1600 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
1601 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
1602
1603 if (iocb_type == ELS_IOCB_TYPE) {
1604 els = &sp->u.iocb_cmd;
1605 els->u.els_plogi.fw_status[0] = fw_status[0];
1606 els->u.els_plogi.fw_status[1] = fw_status[1];
1607 els->u.els_plogi.fw_status[2] = fw_status[2];
1608 els->u.els_plogi.comp_status = fw_status[0];
1609 if (comp_status == CS_COMPLETE) {
1610 res = DID_OK << 16;
1611 } else {
1612 if (comp_status == CS_DATA_UNDERRUN) {
1613 res = DID_OK << 16;
1614 els->u.els_plogi.len =
1615 le16_to_cpu(((struct els_sts_entry_24xx *)
1616 pkt)->total_byte_count);
1617 } else {
1618 els->u.els_plogi.len = 0;
1619 res = DID_ERROR << 16;
1620 }
1621 }
1622 ql_dbg(ql_dbg_user, vha, 0x503f,
1623 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1624 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1625 le16_to_cpu(((struct els_sts_entry_24xx *)
1626 pkt)->total_byte_count));
1627 goto els_ct_done;
1628 }
1629
1630
1631
1632
1633 bsg_job = sp->u.bsg_job;
1634 bsg_reply = bsg_job->reply;
1635 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1636 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1637
1638 if (comp_status != CS_COMPLETE) {
1639 if (comp_status == CS_DATA_UNDERRUN) {
1640 res = DID_OK << 16;
1641 bsg_reply->reply_payload_rcv_len =
1642 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1643
1644 ql_dbg(ql_dbg_user, vha, 0x503f,
1645 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1646 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1647 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1648 le16_to_cpu(((struct els_sts_entry_24xx *)
1649 pkt)->total_byte_count));
1650 } else {
1651 ql_dbg(ql_dbg_user, vha, 0x5040,
1652 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1653 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1654 type, sp->handle, comp_status,
1655 le16_to_cpu(((struct els_sts_entry_24xx *)
1656 pkt)->error_subcode_1),
1657 le16_to_cpu(((struct els_sts_entry_24xx *)
1658 pkt)->error_subcode_2));
1659 res = DID_ERROR << 16;
1660 bsg_reply->reply_payload_rcv_len = 0;
1661 }
1662 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1663 fw_status, sizeof(fw_status));
1664 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1665 pkt, sizeof(*pkt));
1666 }
1667 else {
1668 res = DID_OK << 16;
1669 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1670 bsg_job->reply_len = 0;
1671 }
1672 els_ct_done:
1673
1674 sp->done(sp, res);
1675 }
1676
1677 static void
1678 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1679 struct logio_entry_24xx *logio)
1680 {
1681 const char func[] = "LOGIO-IOCB";
1682 const char *type;
1683 fc_port_t *fcport;
1684 srb_t *sp;
1685 struct srb_iocb *lio;
1686 uint16_t *data;
1687 uint32_t iop[2];
1688
1689 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1690 if (!sp)
1691 return;
1692
1693 lio = &sp->u.iocb_cmd;
1694 type = sp->name;
1695 fcport = sp->fcport;
1696 data = lio->u.logio.data;
1697
1698 data[0] = MBS_COMMAND_ERROR;
1699 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1700 QLA_LOGIO_LOGIN_RETRIED : 0;
1701 if (logio->entry_status) {
1702 ql_log(ql_log_warn, fcport->vha, 0x5034,
1703 "Async-%s error entry - %8phC hdl=%x"
1704 "portid=%02x%02x%02x entry-status=%x.\n",
1705 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1706 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1707 logio->entry_status);
1708 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1709 logio, sizeof(*logio));
1710
1711 goto logio_done;
1712 }
1713
1714 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1715 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1716 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1717 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1718 fcport->d_id.b.domain,
1719 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1720 le32_to_cpu(logio->io_parameter[0]));
1721
1722 vha->hw->exch_starvation = 0;
1723 data[0] = MBS_COMMAND_COMPLETE;
1724
1725 if (sp->type == SRB_PRLI_CMD) {
1726 lio->u.logio.iop[0] =
1727 le32_to_cpu(logio->io_parameter[0]);
1728 lio->u.logio.iop[1] =
1729 le32_to_cpu(logio->io_parameter[1]);
1730 goto logio_done;
1731 }
1732
1733 if (sp->type != SRB_LOGIN_CMD)
1734 goto logio_done;
1735
1736 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1737 if (iop[0] & BIT_4) {
1738 fcport->port_type = FCT_TARGET;
1739 if (iop[0] & BIT_8)
1740 fcport->flags |= FCF_FCP2_DEVICE;
1741 } else if (iop[0] & BIT_5)
1742 fcport->port_type = FCT_INITIATOR;
1743
1744 if (iop[0] & BIT_7)
1745 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1746
1747 if (logio->io_parameter[7] || logio->io_parameter[8])
1748 fcport->supported_classes |= FC_COS_CLASS2;
1749 if (logio->io_parameter[9] || logio->io_parameter[10])
1750 fcport->supported_classes |= FC_COS_CLASS3;
1751
1752 goto logio_done;
1753 }
1754
1755 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1756 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1757 lio->u.logio.iop[0] = iop[0];
1758 lio->u.logio.iop[1] = iop[1];
1759 switch (iop[0]) {
1760 case LSC_SCODE_PORTID_USED:
1761 data[0] = MBS_PORT_ID_USED;
1762 data[1] = LSW(iop[1]);
1763 break;
1764 case LSC_SCODE_NPORT_USED:
1765 data[0] = MBS_LOOP_ID_USED;
1766 break;
1767 case LSC_SCODE_CMD_FAILED:
1768 if (iop[1] == 0x0606) {
1769
1770
1771
1772
1773 data[0] = MBS_COMMAND_COMPLETE;
1774 goto logio_done;
1775 }
1776 data[0] = MBS_COMMAND_ERROR;
1777 break;
1778 case LSC_SCODE_NOXCB:
1779 vha->hw->exch_starvation++;
1780 if (vha->hw->exch_starvation > 5) {
1781 ql_log(ql_log_warn, vha, 0xd046,
1782 "Exchange starvation. Resetting RISC\n");
1783
1784 vha->hw->exch_starvation = 0;
1785
1786 if (IS_P3P_TYPE(vha->hw))
1787 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1788 else
1789 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1790 qla2xxx_wake_dpc(vha);
1791 }
1792
1793 default:
1794 data[0] = MBS_COMMAND_ERROR;
1795 break;
1796 }
1797
1798 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1799 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1800 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1801 sp->handle, fcport->d_id.b.domain,
1802 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1803 le16_to_cpu(logio->comp_status),
1804 le32_to_cpu(logio->io_parameter[0]),
1805 le32_to_cpu(logio->io_parameter[1]));
1806
1807 logio_done:
1808 sp->done(sp, 0);
1809 }
1810
1811 static void
1812 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1813 {
1814 const char func[] = "TMF-IOCB";
1815 const char *type;
1816 fc_port_t *fcport;
1817 srb_t *sp;
1818 struct srb_iocb *iocb;
1819 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1820
1821 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1822 if (!sp)
1823 return;
1824
1825 iocb = &sp->u.iocb_cmd;
1826 type = sp->name;
1827 fcport = sp->fcport;
1828 iocb->u.tmf.data = QLA_SUCCESS;
1829
1830 if (sts->entry_status) {
1831 ql_log(ql_log_warn, fcport->vha, 0x5038,
1832 "Async-%s error - hdl=%x entry-status(%x).\n",
1833 type, sp->handle, sts->entry_status);
1834 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1835 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1836 ql_log(ql_log_warn, fcport->vha, 0x5039,
1837 "Async-%s error - hdl=%x completion status(%x).\n",
1838 type, sp->handle, sts->comp_status);
1839 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1840 } else if ((le16_to_cpu(sts->scsi_status) &
1841 SS_RESPONSE_INFO_LEN_VALID)) {
1842 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1843 ql_log(ql_log_warn, fcport->vha, 0x503b,
1844 "Async-%s error - hdl=%x not enough response(%d).\n",
1845 type, sp->handle, sts->rsp_data_len);
1846 } else if (sts->data[3]) {
1847 ql_log(ql_log_warn, fcport->vha, 0x503c,
1848 "Async-%s error - hdl=%x response(%x).\n",
1849 type, sp->handle, sts->data[3]);
1850 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1851 }
1852 }
1853
1854 if (iocb->u.tmf.data != QLA_SUCCESS)
1855 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
1856 sts, sizeof(*sts));
1857
1858 sp->done(sp, 0);
1859 }
1860
1861 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1862 void *tsk, srb_t *sp)
1863 {
1864 fc_port_t *fcport;
1865 struct srb_iocb *iocb;
1866 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1867 uint16_t state_flags;
1868 struct nvmefc_fcp_req *fd;
1869 uint16_t ret = QLA_SUCCESS;
1870 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1871
1872 iocb = &sp->u.iocb_cmd;
1873 fcport = sp->fcport;
1874 iocb->u.nvme.comp_status = comp_status;
1875 state_flags = le16_to_cpu(sts->state_flags);
1876 fd = iocb->u.nvme.desc;
1877
1878 if (unlikely(iocb->u.nvme.aen_op))
1879 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1890 iocb->u.nvme.rsp_pyld_len = 0;
1891 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1892 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1893 } else if (state_flags & SF_NVME_ERSP) {
1894 uint32_t *inbuf, *outbuf;
1895 uint16_t iter;
1896
1897 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1898 outbuf = (uint32_t *)fd->rspaddr;
1899 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1900 if (unlikely(iocb->u.nvme.rsp_pyld_len >
1901 sizeof(struct nvme_fc_ersp_iu))) {
1902 if (ql_mask_match(ql_dbg_io)) {
1903 WARN_ONCE(1, "Unexpected response payload length %u.\n",
1904 iocb->u.nvme.rsp_pyld_len);
1905 ql_log(ql_log_warn, fcport->vha, 0x5100,
1906 "Unexpected response payload length %u.\n",
1907 iocb->u.nvme.rsp_pyld_len);
1908 }
1909 iocb->u.nvme.rsp_pyld_len =
1910 sizeof(struct nvme_fc_ersp_iu);
1911 }
1912 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1913 for (; iter; iter--)
1914 *outbuf++ = swab32(*inbuf++);
1915 } else {
1916 ql_log(ql_log_warn, fcport->vha, 0x503a,
1917 "NVME-%s error. Unhandled state_flags of %x\n",
1918 sp->name, state_flags);
1919 }
1920
1921 fd->transferred_length = fd->payload_length -
1922 le32_to_cpu(sts->residual_len);
1923
1924 if (unlikely(comp_status != CS_COMPLETE))
1925 ql_log(ql_log_warn, fcport->vha, 0x5060,
1926 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
1927 sp->name, sp->handle, comp_status,
1928 fd->transferred_length, le32_to_cpu(sts->residual_len),
1929 sts->ox_id);
1930
1931
1932
1933
1934
1935 switch (comp_status) {
1936 case CS_COMPLETE:
1937 break;
1938
1939 case CS_RESET:
1940 case CS_PORT_UNAVAILABLE:
1941 case CS_PORT_LOGGED_OUT:
1942 fcport->nvme_flag |= NVME_FLAG_RESETTING;
1943
1944 case CS_ABORTED:
1945 case CS_PORT_BUSY:
1946 fd->transferred_length = 0;
1947 iocb->u.nvme.rsp_pyld_len = 0;
1948 ret = QLA_ABORTED;
1949 break;
1950 case CS_DATA_UNDERRUN:
1951 break;
1952 default:
1953 ret = QLA_FUNCTION_FAILED;
1954 break;
1955 }
1956 sp->done(sp, ret);
1957 }
1958
1959 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1960 struct vp_ctrl_entry_24xx *vce)
1961 {
1962 const char func[] = "CTRLVP-IOCB";
1963 srb_t *sp;
1964 int rval = QLA_SUCCESS;
1965
1966 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1967 if (!sp)
1968 return;
1969
1970 if (vce->entry_status != 0) {
1971 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1972 "%s: Failed to complete IOCB -- error status (%x)\n",
1973 sp->name, vce->entry_status);
1974 rval = QLA_FUNCTION_FAILED;
1975 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1976 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1977 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1978 sp->name, le16_to_cpu(vce->comp_status),
1979 le16_to_cpu(vce->vp_idx_failed));
1980 rval = QLA_FUNCTION_FAILED;
1981 } else {
1982 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1983 "Done %s.\n", __func__);
1984 }
1985
1986 sp->rc = rval;
1987 sp->done(sp, rval);
1988 }
1989
1990
1991 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
1992 struct rsp_que *rsp,
1993 sts_entry_t *pkt)
1994 {
1995 sts21_entry_t *sts21_entry;
1996 sts22_entry_t *sts22_entry;
1997 uint16_t handle_cnt;
1998 uint16_t cnt;
1999
2000 switch (pkt->entry_type) {
2001 case STATUS_TYPE:
2002 qla2x00_status_entry(vha, rsp, pkt);
2003 break;
2004 case STATUS_TYPE_21:
2005 sts21_entry = (sts21_entry_t *)pkt;
2006 handle_cnt = sts21_entry->handle_count;
2007 for (cnt = 0; cnt < handle_cnt; cnt++)
2008 qla2x00_process_completed_request(vha, rsp->req,
2009 sts21_entry->handle[cnt]);
2010 break;
2011 case STATUS_TYPE_22:
2012 sts22_entry = (sts22_entry_t *)pkt;
2013 handle_cnt = sts22_entry->handle_count;
2014 for (cnt = 0; cnt < handle_cnt; cnt++)
2015 qla2x00_process_completed_request(vha, rsp->req,
2016 sts22_entry->handle[cnt]);
2017 break;
2018 case STATUS_CONT_TYPE:
2019 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2020 break;
2021 case MBX_IOCB_TYPE:
2022 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2023 break;
2024 case CT_IOCB_TYPE:
2025 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2026 break;
2027 default:
2028
2029 ql_log(ql_log_warn, vha, 0x504a,
2030 "Received unknown response pkt type %x entry status=%x.\n",
2031 pkt->entry_type, pkt->entry_status);
2032 break;
2033 }
2034 }
2035
2036
2037
2038
2039
2040 void
2041 qla2x00_process_response_queue(struct rsp_que *rsp)
2042 {
2043 struct scsi_qla_host *vha;
2044 struct qla_hw_data *ha = rsp->hw;
2045 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2046 sts_entry_t *pkt;
2047
2048 vha = pci_get_drvdata(ha->pdev);
2049
2050 if (!vha->flags.online)
2051 return;
2052
2053 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2054 pkt = (sts_entry_t *)rsp->ring_ptr;
2055
2056 rsp->ring_index++;
2057 if (rsp->ring_index == rsp->length) {
2058 rsp->ring_index = 0;
2059 rsp->ring_ptr = rsp->ring;
2060 } else {
2061 rsp->ring_ptr++;
2062 }
2063
2064 if (pkt->entry_status != 0) {
2065 qla2x00_error_entry(vha, rsp, pkt);
2066 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2067 wmb();
2068 continue;
2069 }
2070
2071 qla2x00_process_response_entry(vha, rsp, pkt);
2072 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2073 wmb();
2074 }
2075
2076
2077 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2078 }
2079
2080 static inline void
2081 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2082 uint32_t sense_len, struct rsp_que *rsp, int res)
2083 {
2084 struct scsi_qla_host *vha = sp->vha;
2085 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2086 uint32_t track_sense_len;
2087
2088 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2089 sense_len = SCSI_SENSE_BUFFERSIZE;
2090
2091 SET_CMD_SENSE_LEN(sp, sense_len);
2092 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2093 track_sense_len = sense_len;
2094
2095 if (sense_len > par_sense_len)
2096 sense_len = par_sense_len;
2097
2098 memcpy(cp->sense_buffer, sense_data, sense_len);
2099
2100 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2101 track_sense_len -= sense_len;
2102 SET_CMD_SENSE_LEN(sp, track_sense_len);
2103
2104 if (track_sense_len != 0) {
2105 rsp->status_srb = sp;
2106 cp->result = res;
2107 }
2108
2109 if (sense_len) {
2110 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2111 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2112 sp->vha->host_no, cp->device->id, cp->device->lun,
2113 cp);
2114 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2115 cp->sense_buffer, sense_len);
2116 }
2117 }
2118
2119 struct scsi_dif_tuple {
2120 __be16 guard;
2121 __be16 app_tag;
2122 __be32 ref_tag;
2123 };
2124
2125
2126
2127
2128
2129
2130
2131 static inline int
2132 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2133 {
2134 struct scsi_qla_host *vha = sp->vha;
2135 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2136 uint8_t *ap = &sts24->data[12];
2137 uint8_t *ep = &sts24->data[20];
2138 uint32_t e_ref_tag, a_ref_tag;
2139 uint16_t e_app_tag, a_app_tag;
2140 uint16_t e_guard, a_guard;
2141
2142
2143
2144
2145
2146 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2147 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2148 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2149 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2150 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2151 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2152
2153 ql_dbg(ql_dbg_io, vha, 0x3023,
2154 "iocb(s) %p Returned STATUS.\n", sts24);
2155
2156 ql_dbg(ql_dbg_io, vha, 0x3024,
2157 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2158 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2159 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2160 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2161 a_app_tag, e_app_tag, a_guard, e_guard);
2162
2163
2164
2165
2166
2167
2168 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2169 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2170 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2171 uint32_t blocks_done, resid;
2172 sector_t lba_s = scsi_get_lba(cmd);
2173
2174
2175 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2176
2177 resid = scsi_bufflen(cmd) - (blocks_done *
2178 cmd->device->sector_size);
2179
2180 scsi_set_resid(cmd, resid);
2181 cmd->result = DID_OK << 16;
2182
2183
2184 if (scsi_prot_sg_count(cmd)) {
2185 uint32_t i, j = 0, k = 0, num_ent;
2186 struct scatterlist *sg;
2187 struct t10_pi_tuple *spt;
2188
2189
2190 scsi_for_each_prot_sg(cmd, sg,
2191 scsi_prot_sg_count(cmd), i) {
2192 num_ent = sg_dma_len(sg) / 8;
2193 if (k + num_ent < blocks_done) {
2194 k += num_ent;
2195 continue;
2196 }
2197 j = blocks_done - k - 1;
2198 k = blocks_done;
2199 break;
2200 }
2201
2202 if (k != blocks_done) {
2203 ql_log(ql_log_warn, vha, 0x302f,
2204 "unexpected tag values tag:lba=%x:%llx)\n",
2205 e_ref_tag, (unsigned long long)lba_s);
2206 return 1;
2207 }
2208
2209 spt = page_address(sg_page(sg)) + sg->offset;
2210 spt += j;
2211
2212 spt->app_tag = T10_PI_APP_ESCAPE;
2213 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2214 spt->ref_tag = T10_PI_REF_ESCAPE;
2215 }
2216
2217 return 0;
2218 }
2219
2220
2221 if (e_guard != a_guard) {
2222 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2223 0x10, 0x1);
2224 set_driver_byte(cmd, DRIVER_SENSE);
2225 set_host_byte(cmd, DID_ABORT);
2226 cmd->result |= SAM_STAT_CHECK_CONDITION;
2227 return 1;
2228 }
2229
2230
2231 if (e_ref_tag != a_ref_tag) {
2232 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2233 0x10, 0x3);
2234 set_driver_byte(cmd, DRIVER_SENSE);
2235 set_host_byte(cmd, DID_ABORT);
2236 cmd->result |= SAM_STAT_CHECK_CONDITION;
2237 return 1;
2238 }
2239
2240
2241 if (e_app_tag != a_app_tag) {
2242 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2243 0x10, 0x2);
2244 set_driver_byte(cmd, DRIVER_SENSE);
2245 set_host_byte(cmd, DID_ABORT);
2246 cmd->result |= SAM_STAT_CHECK_CONDITION;
2247 return 1;
2248 }
2249
2250 return 1;
2251 }
2252
2253 static void
2254 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2255 struct req_que *req, uint32_t index)
2256 {
2257 struct qla_hw_data *ha = vha->hw;
2258 srb_t *sp;
2259 uint16_t comp_status;
2260 uint16_t scsi_status;
2261 uint16_t thread_id;
2262 uint32_t rval = EXT_STATUS_OK;
2263 struct bsg_job *bsg_job = NULL;
2264 struct fc_bsg_request *bsg_request;
2265 struct fc_bsg_reply *bsg_reply;
2266 sts_entry_t *sts = pkt;
2267 struct sts_entry_24xx *sts24 = pkt;
2268
2269
2270 if (index >= req->num_outstanding_cmds) {
2271 ql_log(ql_log_warn, vha, 0x70af,
2272 "Invalid SCSI completion handle 0x%x.\n", index);
2273 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2274 return;
2275 }
2276
2277 sp = req->outstanding_cmds[index];
2278 if (!sp) {
2279 ql_log(ql_log_warn, vha, 0x70b0,
2280 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2281 req->id, index);
2282
2283 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2284 return;
2285 }
2286
2287
2288 req->outstanding_cmds[index] = NULL;
2289 bsg_job = sp->u.bsg_job;
2290 bsg_request = bsg_job->request;
2291 bsg_reply = bsg_job->reply;
2292
2293 if (IS_FWI2_CAPABLE(ha)) {
2294 comp_status = le16_to_cpu(sts24->comp_status);
2295 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2296 } else {
2297 comp_status = le16_to_cpu(sts->comp_status);
2298 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2299 }
2300
2301 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2302 switch (comp_status) {
2303 case CS_COMPLETE:
2304 if (scsi_status == 0) {
2305 bsg_reply->reply_payload_rcv_len =
2306 bsg_job->reply_payload.payload_len;
2307 vha->qla_stats.input_bytes +=
2308 bsg_reply->reply_payload_rcv_len;
2309 vha->qla_stats.input_requests++;
2310 rval = EXT_STATUS_OK;
2311 }
2312 goto done;
2313
2314 case CS_DATA_OVERRUN:
2315 ql_dbg(ql_dbg_user, vha, 0x70b1,
2316 "Command completed with data overrun thread_id=%d\n",
2317 thread_id);
2318 rval = EXT_STATUS_DATA_OVERRUN;
2319 break;
2320
2321 case CS_DATA_UNDERRUN:
2322 ql_dbg(ql_dbg_user, vha, 0x70b2,
2323 "Command completed with data underrun thread_id=%d\n",
2324 thread_id);
2325 rval = EXT_STATUS_DATA_UNDERRUN;
2326 break;
2327 case CS_BIDIR_RD_OVERRUN:
2328 ql_dbg(ql_dbg_user, vha, 0x70b3,
2329 "Command completed with read data overrun thread_id=%d\n",
2330 thread_id);
2331 rval = EXT_STATUS_DATA_OVERRUN;
2332 break;
2333
2334 case CS_BIDIR_RD_WR_OVERRUN:
2335 ql_dbg(ql_dbg_user, vha, 0x70b4,
2336 "Command completed with read and write data overrun "
2337 "thread_id=%d\n", thread_id);
2338 rval = EXT_STATUS_DATA_OVERRUN;
2339 break;
2340
2341 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2342 ql_dbg(ql_dbg_user, vha, 0x70b5,
2343 "Command completed with read data over and write data "
2344 "underrun thread_id=%d\n", thread_id);
2345 rval = EXT_STATUS_DATA_OVERRUN;
2346 break;
2347
2348 case CS_BIDIR_RD_UNDERRUN:
2349 ql_dbg(ql_dbg_user, vha, 0x70b6,
2350 "Command completed with read data underrun "
2351 "thread_id=%d\n", thread_id);
2352 rval = EXT_STATUS_DATA_UNDERRUN;
2353 break;
2354
2355 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2356 ql_dbg(ql_dbg_user, vha, 0x70b7,
2357 "Command completed with read data under and write data "
2358 "overrun thread_id=%d\n", thread_id);
2359 rval = EXT_STATUS_DATA_UNDERRUN;
2360 break;
2361
2362 case CS_BIDIR_RD_WR_UNDERRUN:
2363 ql_dbg(ql_dbg_user, vha, 0x70b8,
2364 "Command completed with read and write data underrun "
2365 "thread_id=%d\n", thread_id);
2366 rval = EXT_STATUS_DATA_UNDERRUN;
2367 break;
2368
2369 case CS_BIDIR_DMA:
2370 ql_dbg(ql_dbg_user, vha, 0x70b9,
2371 "Command completed with data DMA error thread_id=%d\n",
2372 thread_id);
2373 rval = EXT_STATUS_DMA_ERR;
2374 break;
2375
2376 case CS_TIMEOUT:
2377 ql_dbg(ql_dbg_user, vha, 0x70ba,
2378 "Command completed with timeout thread_id=%d\n",
2379 thread_id);
2380 rval = EXT_STATUS_TIMEOUT;
2381 break;
2382 default:
2383 ql_dbg(ql_dbg_user, vha, 0x70bb,
2384 "Command completed with completion status=0x%x "
2385 "thread_id=%d\n", comp_status, thread_id);
2386 rval = EXT_STATUS_ERR;
2387 break;
2388 }
2389 bsg_reply->reply_payload_rcv_len = 0;
2390
2391 done:
2392
2393 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2394 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2395
2396
2397 sp->done(sp, DID_OK << 16);
2398
2399 }
2400
2401
2402
2403
2404
2405
2406
2407 static void
2408 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2409 {
2410 srb_t *sp;
2411 fc_port_t *fcport;
2412 struct scsi_cmnd *cp;
2413 sts_entry_t *sts = pkt;
2414 struct sts_entry_24xx *sts24 = pkt;
2415 uint16_t comp_status;
2416 uint16_t scsi_status;
2417 uint16_t ox_id;
2418 uint8_t lscsi_status;
2419 int32_t resid;
2420 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2421 fw_resid_len;
2422 uint8_t *rsp_info, *sense_data;
2423 struct qla_hw_data *ha = vha->hw;
2424 uint32_t handle;
2425 uint16_t que;
2426 struct req_que *req;
2427 int logit = 1;
2428 int res = 0;
2429 uint16_t state_flags = 0;
2430 uint16_t retry_delay = 0;
2431
2432 if (IS_FWI2_CAPABLE(ha)) {
2433 comp_status = le16_to_cpu(sts24->comp_status);
2434 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2435 state_flags = le16_to_cpu(sts24->state_flags);
2436 } else {
2437 comp_status = le16_to_cpu(sts->comp_status);
2438 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2439 }
2440 handle = (uint32_t) LSW(sts->handle);
2441 que = MSW(sts->handle);
2442 req = ha->req_q_map[que];
2443
2444
2445 if (req == NULL ||
2446 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2447 ql_dbg(ql_dbg_io, vha, 0x3059,
2448 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2449 "que=%u.\n", sts->handle, req, que);
2450 return;
2451 }
2452
2453
2454 if (handle < req->num_outstanding_cmds) {
2455 sp = req->outstanding_cmds[handle];
2456 if (!sp) {
2457 ql_dbg(ql_dbg_io, vha, 0x3075,
2458 "%s(%ld): Already returned command for status handle (0x%x).\n",
2459 __func__, vha->host_no, sts->handle);
2460 return;
2461 }
2462 } else {
2463 ql_dbg(ql_dbg_io, vha, 0x3017,
2464 "Invalid status handle, out of range (0x%x).\n",
2465 sts->handle);
2466
2467 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2468 if (IS_P3P_TYPE(ha))
2469 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2470 else
2471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2472 qla2xxx_wake_dpc(vha);
2473 }
2474 return;
2475 }
2476
2477 if (sp->abort)
2478 sp->aborted = 1;
2479 else
2480 sp->completed = 1;
2481
2482 if (sp->cmd_type != TYPE_SRB) {
2483 req->outstanding_cmds[handle] = NULL;
2484 ql_dbg(ql_dbg_io, vha, 0x3015,
2485 "Unknown sp->cmd_type %x %p).\n",
2486 sp->cmd_type, sp);
2487 return;
2488 }
2489
2490
2491 if (sp->type == SRB_NVME_CMD) {
2492 req->outstanding_cmds[handle] = NULL;
2493 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2494 return;
2495 }
2496
2497 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2498 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2499 return;
2500 }
2501
2502
2503 if (sp->type == SRB_TM_CMD) {
2504 qla24xx_tm_iocb_entry(vha, req, pkt);
2505 return;
2506 }
2507
2508
2509 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2510 qla2x00_process_completed_request(vha, req, handle);
2511
2512 return;
2513 }
2514
2515 req->outstanding_cmds[handle] = NULL;
2516 cp = GET_CMD_SP(sp);
2517 if (cp == NULL) {
2518 ql_dbg(ql_dbg_io, vha, 0x3018,
2519 "Command already returned (0x%x/%p).\n",
2520 sts->handle, sp);
2521
2522 return;
2523 }
2524
2525 lscsi_status = scsi_status & STATUS_MASK;
2526
2527 fcport = sp->fcport;
2528
2529 ox_id = 0;
2530 sense_len = par_sense_len = rsp_info_len = resid_len =
2531 fw_resid_len = 0;
2532 if (IS_FWI2_CAPABLE(ha)) {
2533 if (scsi_status & SS_SENSE_LEN_VALID)
2534 sense_len = le32_to_cpu(sts24->sense_len);
2535 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2536 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2537 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2538 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2539 if (comp_status == CS_DATA_UNDERRUN)
2540 fw_resid_len = le32_to_cpu(sts24->residual_len);
2541 rsp_info = sts24->data;
2542 sense_data = sts24->data;
2543 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2544 ox_id = le16_to_cpu(sts24->ox_id);
2545 par_sense_len = sizeof(sts24->data);
2546
2547 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
2548 retry_delay = sts24->retry_delay & 0x3fff;
2549 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2550 "%s: scope=%#x retry_delay=%#x\n", __func__,
2551 sts24->retry_delay >> 14, retry_delay);
2552 }
2553 } else {
2554 if (scsi_status & SS_SENSE_LEN_VALID)
2555 sense_len = le16_to_cpu(sts->req_sense_length);
2556 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2557 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2558 resid_len = le32_to_cpu(sts->residual_length);
2559 rsp_info = sts->rsp_info;
2560 sense_data = sts->req_sense_data;
2561 par_sense_len = sizeof(sts->req_sense_data);
2562 }
2563
2564
2565 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2566
2567 if (IS_FWI2_CAPABLE(ha)) {
2568 sense_data += rsp_info_len;
2569 par_sense_len -= rsp_info_len;
2570 }
2571 if (rsp_info_len > 3 && rsp_info[3]) {
2572 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2573 "FCP I/O protocol failure (0x%x/0x%x).\n",
2574 rsp_info_len, rsp_info[3]);
2575
2576 res = DID_BUS_BUSY << 16;
2577 goto out;
2578 }
2579 }
2580
2581
2582 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2583 scsi_status & SS_RESIDUAL_OVER)
2584 comp_status = CS_DATA_OVERRUN;
2585
2586
2587
2588
2589
2590 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2591 lscsi_status == SAM_STAT_BUSY)
2592 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2593
2594
2595
2596
2597 switch (comp_status) {
2598 case CS_COMPLETE:
2599 case CS_QUEUE_FULL:
2600 if (scsi_status == 0) {
2601 res = DID_OK << 16;
2602 break;
2603 }
2604 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2605 resid = resid_len;
2606 scsi_set_resid(cp, resid);
2607
2608 if (!lscsi_status &&
2609 ((unsigned)(scsi_bufflen(cp) - resid) <
2610 cp->underflow)) {
2611 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2612 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2613 resid, scsi_bufflen(cp));
2614
2615 res = DID_ERROR << 16;
2616 break;
2617 }
2618 }
2619 res = DID_OK << 16 | lscsi_status;
2620
2621 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2622 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2623 "QUEUE FULL detected.\n");
2624 break;
2625 }
2626 logit = 0;
2627 if (lscsi_status != SS_CHECK_CONDITION)
2628 break;
2629
2630 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2631 if (!(scsi_status & SS_SENSE_LEN_VALID))
2632 break;
2633
2634 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2635 rsp, res);
2636 break;
2637
2638 case CS_DATA_UNDERRUN:
2639
2640 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2641 scsi_set_resid(cp, resid);
2642 if (scsi_status & SS_RESIDUAL_UNDER) {
2643 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2644 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2645 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2646 resid, scsi_bufflen(cp));
2647
2648 res = DID_ERROR << 16 | lscsi_status;
2649 goto check_scsi_status;
2650 }
2651
2652 if (!lscsi_status &&
2653 ((unsigned)(scsi_bufflen(cp) - resid) <
2654 cp->underflow)) {
2655 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2656 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2657 resid, scsi_bufflen(cp));
2658
2659 res = DID_ERROR << 16;
2660 break;
2661 }
2662 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2663 lscsi_status != SAM_STAT_BUSY) {
2664
2665
2666
2667
2668
2669 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2670 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2671 resid, scsi_bufflen(cp));
2672
2673 res = DID_ERROR << 16 | lscsi_status;
2674 goto check_scsi_status;
2675 } else {
2676 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2677 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2678 scsi_status, lscsi_status);
2679 }
2680
2681 res = DID_OK << 16 | lscsi_status;
2682 logit = 0;
2683
2684 check_scsi_status:
2685
2686
2687
2688
2689 if (lscsi_status != 0) {
2690 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2691 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2692 "QUEUE FULL detected.\n");
2693 logit = 1;
2694 break;
2695 }
2696 if (lscsi_status != SS_CHECK_CONDITION)
2697 break;
2698
2699 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2700 if (!(scsi_status & SS_SENSE_LEN_VALID))
2701 break;
2702
2703 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2704 sense_len, rsp, res);
2705 }
2706 break;
2707
2708 case CS_PORT_LOGGED_OUT:
2709 case CS_PORT_CONFIG_CHG:
2710 case CS_PORT_BUSY:
2711 case CS_INCOMPLETE:
2712 case CS_PORT_UNAVAILABLE:
2713 case CS_TIMEOUT:
2714 case CS_RESET:
2715
2716
2717
2718
2719
2720
2721 res = DID_TRANSPORT_DISRUPTED << 16;
2722
2723 if (comp_status == CS_TIMEOUT) {
2724 if (IS_FWI2_CAPABLE(ha))
2725 break;
2726 else if ((le16_to_cpu(sts->status_flags) &
2727 SF_LOGOUT_SENT) == 0)
2728 break;
2729 }
2730
2731 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2732 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2733 "Port to be marked lost on fcport=%02x%02x%02x, current "
2734 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2735 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2736 port_state_str[FCS_ONLINE],
2737 comp_status);
2738
2739 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2740 qlt_schedule_sess_for_deletion(fcport);
2741 }
2742
2743 break;
2744
2745 case CS_ABORTED:
2746 res = DID_RESET << 16;
2747 break;
2748
2749 case CS_DIF_ERROR:
2750 logit = qla2x00_handle_dif_error(sp, sts24);
2751 res = cp->result;
2752 break;
2753
2754 case CS_TRANSPORT:
2755 res = DID_ERROR << 16;
2756
2757 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2758 break;
2759
2760 if (state_flags & BIT_4)
2761 scmd_printk(KERN_WARNING, cp,
2762 "Unsupported device '%s' found.\n",
2763 cp->device->vendor);
2764 break;
2765
2766 case CS_DMA:
2767 ql_log(ql_log_info, fcport->vha, 0x3022,
2768 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2769 comp_status, scsi_status, res, vha->host_no,
2770 cp->device->id, cp->device->lun, fcport->d_id.b24,
2771 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2772 resid_len, fw_resid_len, sp, cp);
2773 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
2774 pkt, sizeof(*sts24));
2775 res = DID_ERROR << 16;
2776 break;
2777 default:
2778 res = DID_ERROR << 16;
2779 break;
2780 }
2781
2782 out:
2783 if (logit)
2784 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2785 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2786 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2787 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2788 comp_status, scsi_status, res, vha->host_no,
2789 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2790 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2791 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2792 resid_len, fw_resid_len, sp, cp);
2793
2794 if (rsp->status_srb == NULL)
2795 sp->done(sp, res);
2796 }
2797
2798
2799
2800
2801
2802
2803
2804
2805 static void
2806 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2807 {
2808 uint8_t sense_sz = 0;
2809 struct qla_hw_data *ha = rsp->hw;
2810 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2811 srb_t *sp = rsp->status_srb;
2812 struct scsi_cmnd *cp;
2813 uint32_t sense_len;
2814 uint8_t *sense_ptr;
2815
2816 if (!sp || !GET_CMD_SENSE_LEN(sp))
2817 return;
2818
2819 sense_len = GET_CMD_SENSE_LEN(sp);
2820 sense_ptr = GET_CMD_SENSE_PTR(sp);
2821
2822 cp = GET_CMD_SP(sp);
2823 if (cp == NULL) {
2824 ql_log(ql_log_warn, vha, 0x3025,
2825 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2826
2827 rsp->status_srb = NULL;
2828 return;
2829 }
2830
2831 if (sense_len > sizeof(pkt->data))
2832 sense_sz = sizeof(pkt->data);
2833 else
2834 sense_sz = sense_len;
2835
2836
2837 if (IS_FWI2_CAPABLE(ha))
2838 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2839 memcpy(sense_ptr, pkt->data, sense_sz);
2840 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2841 sense_ptr, sense_sz);
2842
2843 sense_len -= sense_sz;
2844 sense_ptr += sense_sz;
2845
2846 SET_CMD_SENSE_PTR(sp, sense_ptr);
2847 SET_CMD_SENSE_LEN(sp, sense_len);
2848
2849
2850 if (sense_len == 0) {
2851 rsp->status_srb = NULL;
2852 sp->done(sp, cp->result);
2853 }
2854 }
2855
2856
2857
2858
2859
2860
2861
2862
2863 static int
2864 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2865 {
2866 srb_t *sp;
2867 struct qla_hw_data *ha = vha->hw;
2868 const char func[] = "ERROR-IOCB";
2869 uint16_t que = MSW(pkt->handle);
2870 struct req_que *req = NULL;
2871 int res = DID_ERROR << 16;
2872
2873 ql_dbg(ql_dbg_async, vha, 0x502a,
2874 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2875 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2876
2877 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2878 goto fatal;
2879
2880 req = ha->req_q_map[que];
2881
2882 if (pkt->entry_status & RF_BUSY)
2883 res = DID_BUS_BUSY << 16;
2884
2885 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2886 return 0;
2887
2888 switch (pkt->entry_type) {
2889 case NOTIFY_ACK_TYPE:
2890 case STATUS_TYPE:
2891 case STATUS_CONT_TYPE:
2892 case LOGINOUT_PORT_IOCB_TYPE:
2893 case CT_IOCB_TYPE:
2894 case ELS_IOCB_TYPE:
2895 case ABORT_IOCB_TYPE:
2896 case MBX_IOCB_TYPE:
2897 default:
2898 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2899 if (sp) {
2900 sp->done(sp, res);
2901 return 0;
2902 }
2903 break;
2904
2905 case ABTS_RESP_24XX:
2906 case CTIO_TYPE7:
2907 case CTIO_CRC2:
2908 return 1;
2909 }
2910 fatal:
2911 ql_log(ql_log_warn, vha, 0x5030,
2912 "Error entry - invalid handle/queue (%04x).\n", que);
2913 return 0;
2914 }
2915
2916
2917
2918
2919
2920
2921 static void
2922 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2923 {
2924 uint16_t cnt;
2925 uint32_t mboxes;
2926 uint16_t __iomem *wptr;
2927 struct qla_hw_data *ha = vha->hw;
2928 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2929
2930
2931 WARN_ON_ONCE(ha->mbx_count > 32);
2932 mboxes = (1ULL << ha->mbx_count) - 1;
2933 if (!ha->mcp)
2934 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2935 else
2936 mboxes = ha->mcp->in_mb;
2937
2938
2939 ha->flags.mbox_int = 1;
2940 ha->mailbox_out[0] = mb0;
2941 mboxes >>= 1;
2942 wptr = (uint16_t __iomem *)®->mailbox1;
2943
2944 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2945 if (mboxes & BIT_0)
2946 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2947
2948 mboxes >>= 1;
2949 wptr++;
2950 }
2951 }
2952
2953 static void
2954 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2955 struct abort_entry_24xx *pkt)
2956 {
2957 const char func[] = "ABT_IOCB";
2958 srb_t *sp;
2959 struct srb_iocb *abt;
2960
2961 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2962 if (!sp)
2963 return;
2964
2965 abt = &sp->u.iocb_cmd;
2966 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2967 sp->done(sp, 0);
2968 }
2969
2970 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2971 struct pt_ls4_request *pkt, struct req_que *req)
2972 {
2973 srb_t *sp;
2974 const char func[] = "LS4_IOCB";
2975 uint16_t comp_status;
2976
2977 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2978 if (!sp)
2979 return;
2980
2981 comp_status = le16_to_cpu(pkt->status);
2982 sp->done(sp, comp_status);
2983 }
2984
2985
2986
2987
2988
2989
2990 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2991 struct rsp_que *rsp)
2992 {
2993 struct sts_entry_24xx *pkt;
2994 struct qla_hw_data *ha = vha->hw;
2995
2996 if (!ha->flags.fw_started)
2997 return;
2998
2999 if (rsp->qpair->cpuid != smp_processor_id())
3000 qla_cpu_update(rsp->qpair, smp_processor_id());
3001
3002 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3003 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3004
3005 rsp->ring_index++;
3006 if (rsp->ring_index == rsp->length) {
3007 rsp->ring_index = 0;
3008 rsp->ring_ptr = rsp->ring;
3009 } else {
3010 rsp->ring_ptr++;
3011 }
3012
3013 if (pkt->entry_status != 0) {
3014 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3015 goto process_err;
3016
3017 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3018 wmb();
3019 continue;
3020 }
3021 process_err:
3022
3023 switch (pkt->entry_type) {
3024 case STATUS_TYPE:
3025 qla2x00_status_entry(vha, rsp, pkt);
3026 break;
3027 case STATUS_CONT_TYPE:
3028 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3029 break;
3030 case VP_RPT_ID_IOCB_TYPE:
3031 qla24xx_report_id_acquisition(vha,
3032 (struct vp_rpt_id_entry_24xx *)pkt);
3033 break;
3034 case LOGINOUT_PORT_IOCB_TYPE:
3035 qla24xx_logio_entry(vha, rsp->req,
3036 (struct logio_entry_24xx *)pkt);
3037 break;
3038 case CT_IOCB_TYPE:
3039 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3040 break;
3041 case ELS_IOCB_TYPE:
3042 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3043 break;
3044 case ABTS_RECV_24XX:
3045 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3046 IS_QLA28XX(ha)) {
3047
3048 qlt_handle_abts_recv(vha, rsp,
3049 (response_t *)pkt);
3050 break;
3051 } else {
3052 qlt_24xx_process_atio_queue(vha, 1);
3053 }
3054
3055 case ABTS_RESP_24XX:
3056 case CTIO_TYPE7:
3057 case CTIO_CRC2:
3058 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3059 break;
3060 case PT_LS4_REQUEST:
3061 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3062 rsp->req);
3063 break;
3064 case NOTIFY_ACK_TYPE:
3065 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3066 qlt_response_pkt_all_vps(vha, rsp,
3067 (response_t *)pkt);
3068 else
3069 qla24xxx_nack_iocb_entry(vha, rsp->req,
3070 (struct nack_to_isp *)pkt);
3071 break;
3072 case MARKER_TYPE:
3073
3074
3075
3076 break;
3077 case ABORT_IOCB_TYPE:
3078 qla24xx_abort_iocb_entry(vha, rsp->req,
3079 (struct abort_entry_24xx *)pkt);
3080 break;
3081 case MBX_IOCB_TYPE:
3082 qla24xx_mbx_iocb_entry(vha, rsp->req,
3083 (struct mbx_24xx_entry *)pkt);
3084 break;
3085 case VP_CTRL_IOCB_TYPE:
3086 qla_ctrlvp_completed(vha, rsp->req,
3087 (struct vp_ctrl_entry_24xx *)pkt);
3088 break;
3089 default:
3090
3091 ql_dbg(ql_dbg_async, vha, 0x5042,
3092 "Received unknown response pkt type %x "
3093 "entry status=%x.\n",
3094 pkt->entry_type, pkt->entry_status);
3095 break;
3096 }
3097 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3098 wmb();
3099 }
3100
3101
3102 if (IS_P3P_TYPE(ha)) {
3103 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3104
3105 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
3106 } else {
3107 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3108 }
3109 }
3110
3111 static void
3112 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3113 {
3114 int rval;
3115 uint32_t cnt;
3116 struct qla_hw_data *ha = vha->hw;
3117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3118
3119 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3120 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3121 return;
3122
3123 rval = QLA_SUCCESS;
3124 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3125 RD_REG_DWORD(®->iobase_addr);
3126 WRT_REG_DWORD(®->iobase_window, 0x0001);
3127 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3128 rval == QLA_SUCCESS; cnt--) {
3129 if (cnt) {
3130 WRT_REG_DWORD(®->iobase_window, 0x0001);
3131 udelay(10);
3132 } else
3133 rval = QLA_FUNCTION_TIMEOUT;
3134 }
3135 if (rval == QLA_SUCCESS)
3136 goto next_test;
3137
3138 rval = QLA_SUCCESS;
3139 WRT_REG_DWORD(®->iobase_window, 0x0003);
3140 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3141 rval == QLA_SUCCESS; cnt--) {
3142 if (cnt) {
3143 WRT_REG_DWORD(®->iobase_window, 0x0003);
3144 udelay(10);
3145 } else
3146 rval = QLA_FUNCTION_TIMEOUT;
3147 }
3148 if (rval != QLA_SUCCESS)
3149 goto done;
3150
3151 next_test:
3152 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3153 ql_log(ql_log_info, vha, 0x504c,
3154 "Additional code -- 0x55AA.\n");
3155
3156 done:
3157 WRT_REG_DWORD(®->iobase_window, 0x0000);
3158 RD_REG_DWORD(®->iobase_window);
3159 }
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170 irqreturn_t
3171 qla24xx_intr_handler(int irq, void *dev_id)
3172 {
3173 scsi_qla_host_t *vha;
3174 struct qla_hw_data *ha;
3175 struct device_reg_24xx __iomem *reg;
3176 int status;
3177 unsigned long iter;
3178 uint32_t stat;
3179 uint32_t hccr;
3180 uint16_t mb[8];
3181 struct rsp_que *rsp;
3182 unsigned long flags;
3183 bool process_atio = false;
3184
3185 rsp = (struct rsp_que *) dev_id;
3186 if (!rsp) {
3187 ql_log(ql_log_info, NULL, 0x5059,
3188 "%s: NULL response queue pointer.\n", __func__);
3189 return IRQ_NONE;
3190 }
3191
3192 ha = rsp->hw;
3193 reg = &ha->iobase->isp24;
3194 status = 0;
3195
3196 if (unlikely(pci_channel_offline(ha->pdev)))
3197 return IRQ_HANDLED;
3198
3199 spin_lock_irqsave(&ha->hardware_lock, flags);
3200 vha = pci_get_drvdata(ha->pdev);
3201 for (iter = 50; iter--; ) {
3202 stat = RD_REG_DWORD(®->host_status);
3203 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3204 break;
3205 if (stat & HSRX_RISC_PAUSED) {
3206 if (unlikely(pci_channel_offline(ha->pdev)))
3207 break;
3208
3209 hccr = RD_REG_DWORD(®->hccr);
3210
3211 ql_log(ql_log_warn, vha, 0x504b,
3212 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3213 hccr);
3214
3215 qla2xxx_check_risc_status(vha);
3216
3217 ha->isp_ops->fw_dump(vha, 1);
3218 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3219 break;
3220 } else if ((stat & HSRX_RISC_INT) == 0)
3221 break;
3222
3223 switch (stat & 0xff) {
3224 case INTR_ROM_MB_SUCCESS:
3225 case INTR_ROM_MB_FAILED:
3226 case INTR_MB_SUCCESS:
3227 case INTR_MB_FAILED:
3228 qla24xx_mbx_completion(vha, MSW(stat));
3229 status |= MBX_INTERRUPT;
3230
3231 break;
3232 case INTR_ASYNC_EVENT:
3233 mb[0] = MSW(stat);
3234 mb[1] = RD_REG_WORD(®->mailbox1);
3235 mb[2] = RD_REG_WORD(®->mailbox2);
3236 mb[3] = RD_REG_WORD(®->mailbox3);
3237 qla2x00_async_event(vha, rsp, mb);
3238 break;
3239 case INTR_RSP_QUE_UPDATE:
3240 case INTR_RSP_QUE_UPDATE_83XX:
3241 qla24xx_process_response_queue(vha, rsp);
3242 break;
3243 case INTR_ATIO_QUE_UPDATE_27XX:
3244 case INTR_ATIO_QUE_UPDATE:
3245 process_atio = true;
3246 break;
3247 case INTR_ATIO_RSP_QUE_UPDATE:
3248 process_atio = true;
3249 qla24xx_process_response_queue(vha, rsp);
3250 break;
3251 default:
3252 ql_dbg(ql_dbg_async, vha, 0x504f,
3253 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3254 break;
3255 }
3256 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3257 RD_REG_DWORD_RELAXED(®->hccr);
3258 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3259 ndelay(3500);
3260 }
3261 qla2x00_handle_mbx_completion(ha, status);
3262 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3263
3264 if (process_atio) {
3265 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3266 qlt_24xx_process_atio_queue(vha, 0);
3267 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3268 }
3269
3270 return IRQ_HANDLED;
3271 }
3272
3273 static irqreturn_t
3274 qla24xx_msix_rsp_q(int irq, void *dev_id)
3275 {
3276 struct qla_hw_data *ha;
3277 struct rsp_que *rsp;
3278 struct device_reg_24xx __iomem *reg;
3279 struct scsi_qla_host *vha;
3280 unsigned long flags;
3281
3282 rsp = (struct rsp_que *) dev_id;
3283 if (!rsp) {
3284 ql_log(ql_log_info, NULL, 0x505a,
3285 "%s: NULL response queue pointer.\n", __func__);
3286 return IRQ_NONE;
3287 }
3288 ha = rsp->hw;
3289 reg = &ha->iobase->isp24;
3290
3291 spin_lock_irqsave(&ha->hardware_lock, flags);
3292
3293 vha = pci_get_drvdata(ha->pdev);
3294 qla24xx_process_response_queue(vha, rsp);
3295 if (!ha->flags.disable_msix_handshake) {
3296 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3297 RD_REG_DWORD_RELAXED(®->hccr);
3298 }
3299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3300
3301 return IRQ_HANDLED;
3302 }
3303
3304 static irqreturn_t
3305 qla24xx_msix_default(int irq, void *dev_id)
3306 {
3307 scsi_qla_host_t *vha;
3308 struct qla_hw_data *ha;
3309 struct rsp_que *rsp;
3310 struct device_reg_24xx __iomem *reg;
3311 int status;
3312 uint32_t stat;
3313 uint32_t hccr;
3314 uint16_t mb[8];
3315 unsigned long flags;
3316 bool process_atio = false;
3317
3318 rsp = (struct rsp_que *) dev_id;
3319 if (!rsp) {
3320 ql_log(ql_log_info, NULL, 0x505c,
3321 "%s: NULL response queue pointer.\n", __func__);
3322 return IRQ_NONE;
3323 }
3324 ha = rsp->hw;
3325 reg = &ha->iobase->isp24;
3326 status = 0;
3327
3328 spin_lock_irqsave(&ha->hardware_lock, flags);
3329 vha = pci_get_drvdata(ha->pdev);
3330 do {
3331 stat = RD_REG_DWORD(®->host_status);
3332 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3333 break;
3334 if (stat & HSRX_RISC_PAUSED) {
3335 if (unlikely(pci_channel_offline(ha->pdev)))
3336 break;
3337
3338 hccr = RD_REG_DWORD(®->hccr);
3339
3340 ql_log(ql_log_info, vha, 0x5050,
3341 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3342 hccr);
3343
3344 qla2xxx_check_risc_status(vha);
3345
3346 ha->isp_ops->fw_dump(vha, 1);
3347 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3348 break;
3349 } else if ((stat & HSRX_RISC_INT) == 0)
3350 break;
3351
3352 switch (stat & 0xff) {
3353 case INTR_ROM_MB_SUCCESS:
3354 case INTR_ROM_MB_FAILED:
3355 case INTR_MB_SUCCESS:
3356 case INTR_MB_FAILED:
3357 qla24xx_mbx_completion(vha, MSW(stat));
3358 status |= MBX_INTERRUPT;
3359
3360 break;
3361 case INTR_ASYNC_EVENT:
3362 mb[0] = MSW(stat);
3363 mb[1] = RD_REG_WORD(®->mailbox1);
3364 mb[2] = RD_REG_WORD(®->mailbox2);
3365 mb[3] = RD_REG_WORD(®->mailbox3);
3366 qla2x00_async_event(vha, rsp, mb);
3367 break;
3368 case INTR_RSP_QUE_UPDATE:
3369 case INTR_RSP_QUE_UPDATE_83XX:
3370 qla24xx_process_response_queue(vha, rsp);
3371 break;
3372 case INTR_ATIO_QUE_UPDATE_27XX:
3373 case INTR_ATIO_QUE_UPDATE:
3374 process_atio = true;
3375 break;
3376 case INTR_ATIO_RSP_QUE_UPDATE:
3377 process_atio = true;
3378 qla24xx_process_response_queue(vha, rsp);
3379 break;
3380 default:
3381 ql_dbg(ql_dbg_async, vha, 0x5051,
3382 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3383 break;
3384 }
3385 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3386 } while (0);
3387 qla2x00_handle_mbx_completion(ha, status);
3388 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3389
3390 if (process_atio) {
3391 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3392 qlt_24xx_process_atio_queue(vha, 0);
3393 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3394 }
3395
3396 return IRQ_HANDLED;
3397 }
3398
3399 irqreturn_t
3400 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3401 {
3402 struct qla_hw_data *ha;
3403 struct qla_qpair *qpair;
3404 struct device_reg_24xx __iomem *reg;
3405 unsigned long flags;
3406
3407 qpair = dev_id;
3408 if (!qpair) {
3409 ql_log(ql_log_info, NULL, 0x505b,
3410 "%s: NULL response queue pointer.\n", __func__);
3411 return IRQ_NONE;
3412 }
3413 ha = qpair->hw;
3414
3415
3416 if (unlikely(!ha->flags.disable_msix_handshake)) {
3417 reg = &ha->iobase->isp24;
3418 spin_lock_irqsave(&ha->hardware_lock, flags);
3419 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3420 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3421 }
3422
3423 queue_work(ha->wq, &qpair->q_work);
3424
3425 return IRQ_HANDLED;
3426 }
3427
3428
3429
3430 struct qla_init_msix_entry {
3431 const char *name;
3432 irq_handler_t handler;
3433 };
3434
3435 static const struct qla_init_msix_entry msix_entries[] = {
3436 { "default", qla24xx_msix_default },
3437 { "rsp_q", qla24xx_msix_rsp_q },
3438 { "atio_q", qla83xx_msix_atio_q },
3439 { "qpair_multiq", qla2xxx_msix_rsp_q },
3440 };
3441
3442 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3443 { "qla2xxx (default)", qla82xx_msix_default },
3444 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3445 };
3446
3447 static int
3448 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3449 {
3450 int i, ret;
3451 struct qla_msix_entry *qentry;
3452 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3453 int min_vecs = QLA_BASE_VECTORS;
3454 struct irq_affinity desc = {
3455 .pre_vectors = QLA_BASE_VECTORS,
3456 };
3457
3458 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3459 IS_ATIO_MSIX_CAPABLE(ha)) {
3460 desc.pre_vectors++;
3461 min_vecs++;
3462 }
3463
3464 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
3465
3466 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3467 ha->msix_count, PCI_IRQ_MSIX);
3468 } else
3469 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3470 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3471 &desc);
3472
3473 if (ret < 0) {
3474 ql_log(ql_log_fatal, vha, 0x00c7,
3475 "MSI-X: Failed to enable support, "
3476 "giving up -- %d/%d.\n",
3477 ha->msix_count, ret);
3478 goto msix_out;
3479 } else if (ret < ha->msix_count) {
3480 ql_log(ql_log_info, vha, 0x00c6,
3481 "MSI-X: Using %d vectors\n", ret);
3482 ha->msix_count = ret;
3483
3484 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3485 ha->max_req_queues = ha->msix_count - 1;
3486
3487
3488 if (QLA_TGT_MODE_ENABLED())
3489 ha->max_req_queues--;
3490
3491 ha->max_rsp_queues = ha->max_req_queues;
3492
3493 ha->max_qpairs = ha->max_req_queues - 1;
3494 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3495 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3496 }
3497 }
3498 vha->irq_offset = desc.pre_vectors;
3499 ha->msix_entries = kcalloc(ha->msix_count,
3500 sizeof(struct qla_msix_entry),
3501 GFP_KERNEL);
3502 if (!ha->msix_entries) {
3503 ql_log(ql_log_fatal, vha, 0x00c8,
3504 "Failed to allocate memory for ha->msix_entries.\n");
3505 ret = -ENOMEM;
3506 goto free_irqs;
3507 }
3508 ha->flags.msix_enabled = 1;
3509
3510 for (i = 0; i < ha->msix_count; i++) {
3511 qentry = &ha->msix_entries[i];
3512 qentry->vector = pci_irq_vector(ha->pdev, i);
3513 qentry->entry = i;
3514 qentry->have_irq = 0;
3515 qentry->in_use = 0;
3516 qentry->handle = NULL;
3517 }
3518
3519
3520 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3521 qentry = &ha->msix_entries[i];
3522 qentry->handle = rsp;
3523 rsp->msix = qentry;
3524 scnprintf(qentry->name, sizeof(qentry->name),
3525 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3526 if (IS_P3P_TYPE(ha))
3527 ret = request_irq(qentry->vector,
3528 qla82xx_msix_entries[i].handler,
3529 0, qla82xx_msix_entries[i].name, rsp);
3530 else
3531 ret = request_irq(qentry->vector,
3532 msix_entries[i].handler,
3533 0, qentry->name, rsp);
3534 if (ret)
3535 goto msix_register_fail;
3536 qentry->have_irq = 1;
3537 qentry->in_use = 1;
3538 }
3539
3540
3541
3542
3543
3544 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3545 IS_ATIO_MSIX_CAPABLE(ha)) {
3546 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3547 rsp->msix = qentry;
3548 qentry->handle = rsp;
3549 scnprintf(qentry->name, sizeof(qentry->name),
3550 "qla2xxx%lu_%s", vha->host_no,
3551 msix_entries[QLA_ATIO_VECTOR].name);
3552 qentry->in_use = 1;
3553 ret = request_irq(qentry->vector,
3554 msix_entries[QLA_ATIO_VECTOR].handler,
3555 0, qentry->name, rsp);
3556 qentry->have_irq = 1;
3557 }
3558
3559 msix_register_fail:
3560 if (ret) {
3561 ql_log(ql_log_fatal, vha, 0x00cb,
3562 "MSI-X: unable to register handler -- %x/%d.\n",
3563 qentry->vector, ret);
3564 qla2x00_free_irqs(vha);
3565 ha->mqenable = 0;
3566 goto msix_out;
3567 }
3568
3569
3570 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3571 if (ha->msixbase && ha->mqiobase &&
3572 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3573 ql2xmqsupport))
3574 ha->mqenable = 1;
3575 } else
3576 if (ha->mqiobase &&
3577 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3578 ql2xmqsupport))
3579 ha->mqenable = 1;
3580 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3581 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3582 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3583 ql_dbg(ql_dbg_init, vha, 0x0055,
3584 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3585 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3586
3587 msix_out:
3588 return ret;
3589
3590 free_irqs:
3591 pci_free_irq_vectors(ha->pdev);
3592 goto msix_out;
3593 }
3594
3595 int
3596 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3597 {
3598 int ret = QLA_FUNCTION_FAILED;
3599 device_reg_t *reg = ha->iobase;
3600 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3601
3602
3603 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3604 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3605 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
3606 goto skip_msi;
3607
3608 if (ql2xenablemsix == 2)
3609 goto skip_msix;
3610
3611 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3612 (ha->pdev->subsystem_device == 0x7040 ||
3613 ha->pdev->subsystem_device == 0x7041 ||
3614 ha->pdev->subsystem_device == 0x1705)) {
3615 ql_log(ql_log_warn, vha, 0x0034,
3616 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3617 ha->pdev->subsystem_vendor,
3618 ha->pdev->subsystem_device);
3619 goto skip_msi;
3620 }
3621
3622 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3623 ql_log(ql_log_warn, vha, 0x0035,
3624 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3625 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3626 goto skip_msix;
3627 }
3628
3629 ret = qla24xx_enable_msix(ha, rsp);
3630 if (!ret) {
3631 ql_dbg(ql_dbg_init, vha, 0x0036,
3632 "MSI-X: Enabled (0x%X, 0x%X).\n",
3633 ha->chip_revision, ha->fw_attributes);
3634 goto clear_risc_ints;
3635 }
3636
3637 skip_msix:
3638
3639 ql_log(ql_log_info, vha, 0x0037,
3640 "Falling back-to MSI mode -- ret=%d.\n", ret);
3641
3642 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3643 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3644 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3645 goto skip_msi;
3646
3647 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3648 if (ret > 0) {
3649 ql_dbg(ql_dbg_init, vha, 0x0038,
3650 "MSI: Enabled.\n");
3651 ha->flags.msi_enabled = 1;
3652 } else
3653 ql_log(ql_log_warn, vha, 0x0039,
3654 "Falling back-to INTa mode -- ret=%d.\n", ret);
3655 skip_msi:
3656
3657
3658 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3659 return QLA_FUNCTION_FAILED;
3660
3661 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3662 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3663 QLA2XXX_DRIVER_NAME, rsp);
3664 if (ret) {
3665 ql_log(ql_log_warn, vha, 0x003a,
3666 "Failed to reserve interrupt %d already in use.\n",
3667 ha->pdev->irq);
3668 goto fail;
3669 } else if (!ha->flags.msi_enabled) {
3670 ql_dbg(ql_dbg_init, vha, 0x0125,
3671 "INTa mode: Enabled.\n");
3672 ha->flags.mr_intr_valid = 1;
3673 }
3674
3675 clear_risc_ints:
3676 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3677 goto fail;
3678
3679 spin_lock_irq(&ha->hardware_lock);
3680 WRT_REG_WORD(®->isp.semaphore, 0);
3681 spin_unlock_irq(&ha->hardware_lock);
3682
3683 fail:
3684 return ret;
3685 }
3686
3687 void
3688 qla2x00_free_irqs(scsi_qla_host_t *vha)
3689 {
3690 struct qla_hw_data *ha = vha->hw;
3691 struct rsp_que *rsp;
3692 struct qla_msix_entry *qentry;
3693 int i;
3694
3695
3696
3697
3698
3699 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3700 goto free_irqs;
3701 rsp = ha->rsp_q_map[0];
3702
3703 if (ha->flags.msix_enabled) {
3704 for (i = 0; i < ha->msix_count; i++) {
3705 qentry = &ha->msix_entries[i];
3706 if (qentry->have_irq) {
3707 irq_set_affinity_notifier(qentry->vector, NULL);
3708 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3709 }
3710 }
3711 kfree(ha->msix_entries);
3712 ha->msix_entries = NULL;
3713 ha->flags.msix_enabled = 0;
3714 ql_dbg(ql_dbg_init, vha, 0x0042,
3715 "Disabled MSI-X.\n");
3716 } else {
3717 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3718 }
3719
3720 free_irqs:
3721 pci_free_irq_vectors(ha->pdev);
3722 }
3723
3724 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3725 struct qla_msix_entry *msix, int vector_type)
3726 {
3727 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3728 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3729 int ret;
3730
3731 scnprintf(msix->name, sizeof(msix->name),
3732 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3733 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3734 if (ret) {
3735 ql_log(ql_log_fatal, vha, 0x00e6,
3736 "MSI-X: Unable to register handler -- %x/%d.\n",
3737 msix->vector, ret);
3738 return ret;
3739 }
3740 msix->have_irq = 1;
3741 msix->handle = qpair;
3742 return ret;
3743 }