This source file includes following definitions.
- lpfc_rport_data_from_scsi_device
- lpfc_cmd_blksize
- lpfc_cmd_protect
- lpfc_cmd_guard_csum
- lpfc_sli4_set_rsp_sgl_last
- lpfc_update_stats
- lpfc_rampdown_queue_depth
- lpfc_ramp_down_queue_handler
- lpfc_scsi_dev_block
- lpfc_new_scsi_buf_s3
- lpfc_sli4_vport_delete_fcp_xri_aborted
- lpfc_sli4_io_xri_aborted
- lpfc_get_scsi_buf_s3
- lpfc_get_scsi_buf_s4
- lpfc_get_scsi_buf
- lpfc_release_scsi_buf_s3
- lpfc_release_scsi_buf_s4
- lpfc_release_scsi_buf
- lpfc_scsi_prep_dma_buf_s3
- lpfc_bg_err_inject
- lpfc_sc_to_bg_opcodes
- lpfc_bg_err_opcodes
- lpfc_bg_setup_bpl
- lpfc_bg_setup_bpl_prot
- lpfc_bg_setup_sgl
- lpfc_bg_setup_sgl_prot
- lpfc_prot_group_type
- lpfc_bg_scsi_adjust_dl
- lpfc_bg_scsi_prep_dma_buf_s3
- lpfc_bg_crc
- lpfc_bg_csum
- lpfc_calc_bg_err
- lpfc_parse_bg_err
- lpfc_scsi_prep_dma_buf_s4
- lpfc_bg_scsi_prep_dma_buf_s4
- lpfc_scsi_prep_dma_buf
- lpfc_bg_scsi_prep_dma_buf
- lpfc_send_scsi_error_event
- lpfc_scsi_unprep_dma_buf
- lpfc_handle_fcp_err
- lpfc_scsi_cmd_iocb_cmpl
- lpfc_fcpcmd_to_iocb
- lpfc_scsi_prep_cmnd
- lpfc_scsi_prep_task_mgmt_cmd
- lpfc_scsi_api_table_setup
- lpfc_tskmgmt_def_cmpl
- lpfc_check_pci_resettable
- lpfc_info
- lpfc_poll_rearm_timer
- lpfc_poll_start_timer
- lpfc_poll_timeout
- lpfc_queuecommand
- lpfc_abort_handler
- lpfc_taskmgmt_name
- lpfc_check_fcp_rsp
- lpfc_send_taskmgmt
- lpfc_chk_tgt_mapped
- lpfc_reset_flush_io_context
- lpfc_device_reset_handler
- lpfc_target_reset_handler
- lpfc_bus_reset_handler
- lpfc_host_reset_handler
- lpfc_slave_alloc
- lpfc_slave_configure
- lpfc_slave_destroy
- lpfc_create_device_data
- lpfc_delete_device_data
- __lpfc_get_device_data
- lpfc_find_next_oas_lun
- lpfc_enable_oas_lun
- lpfc_disable_oas_lun
- lpfc_no_command
- lpfc_no_handler
- lpfc_no_slave
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT 2
54 #define LPFC_ABORT_WAIT 2
55
56 static char *dif_op_str[] = {
57 "PROT_NORMAL",
58 "PROT_READ_INSERT",
59 "PROT_WRITE_STRIP",
60 "PROT_READ_STRIP",
61 "PROT_WRITE_INSERT",
62 "PROT_READ_PASS",
63 "PROT_WRITE_PASS",
64 };
65
66 struct scsi_dif_tuple {
67 __be16 guard_tag;
68 __be16 app_tag;
69 __be32 ref_tag;
70 };
71
72 static struct lpfc_rport_data *
73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74 {
75 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76
77 if (vport->phba->cfg_fof)
78 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79 else
80 return (struct lpfc_rport_data *)sdev->hostdata;
81 }
82
83 static void
84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85 static void
86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 static int
88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89
90 static inline unsigned
91 lpfc_cmd_blksize(struct scsi_cmnd *sc)
92 {
93 return sc->device->sector_size;
94 }
95
96 #define LPFC_CHECK_PROTECT_GUARD 1
97 #define LPFC_CHECK_PROTECT_REF 2
98 static inline unsigned
99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100 {
101 return 1;
102 }
103
104 static inline unsigned
105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106 {
107 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108 return 0;
109 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110 return 1;
111 return 0;
112 }
113
114
115
116
117
118
119
120
121
122 static void
123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124 struct lpfc_io_buf *lpfc_cmd)
125 {
126 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127 if (sgl) {
128 sgl += 1;
129 sgl->word2 = le32_to_cpu(sgl->word2);
130 bf_set(lpfc_sli4_sge_last, sgl, 1);
131 sgl->word2 = cpu_to_le32(sgl->word2);
132 }
133 }
134
135
136
137
138
139
140
141
142
143 static void
144 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
145 {
146 struct lpfc_rport_data *rdata;
147 struct lpfc_nodelist *pnode;
148 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
149 unsigned long flags;
150 struct Scsi_Host *shost = cmd->device->host;
151 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
152 unsigned long latency;
153 int i;
154
155 if (!vport->stat_data_enabled ||
156 vport->stat_data_blocked ||
157 (cmd->result))
158 return;
159
160 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161 rdata = lpfc_cmd->rdata;
162 pnode = rdata->pnode;
163
164 spin_lock_irqsave(shost->host_lock, flags);
165 if (!pnode ||
166 !pnode->lat_data ||
167 (phba->bucket_type == LPFC_NO_BUCKET)) {
168 spin_unlock_irqrestore(shost->host_lock, flags);
169 return;
170 }
171
172 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174 phba->bucket_step;
175
176 if (i < 0)
177 i = 0;
178 else if (i >= LPFC_MAX_BUCKET_COUNT)
179 i = LPFC_MAX_BUCKET_COUNT - 1;
180 } else {
181 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182 if (latency <= (phba->bucket_base +
183 ((1<<i)*phba->bucket_step)))
184 break;
185 }
186
187 pnode->lat_data[i].cmd_count++;
188 spin_unlock_irqrestore(shost->host_lock, flags);
189 }
190
191
192
193
194
195
196
197
198
199
200
201
202 void
203 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204 {
205 unsigned long flags;
206 uint32_t evt_posted;
207 unsigned long expires;
208
209 spin_lock_irqsave(&phba->hbalock, flags);
210 atomic_inc(&phba->num_rsrc_err);
211 phba->last_rsrc_error_time = jiffies;
212
213 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214 if (time_after(expires, jiffies)) {
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 return;
217 }
218
219 phba->last_ramp_down_time = jiffies;
220
221 spin_unlock_irqrestore(&phba->hbalock, flags);
222
223 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225 if (!evt_posted)
226 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228
229 if (!evt_posted)
230 lpfc_worker_wake_up(phba);
231 return;
232 }
233
234
235
236
237
238
239
240
241
242 void
243 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244 {
245 struct lpfc_vport **vports;
246 struct Scsi_Host *shost;
247 struct scsi_device *sdev;
248 unsigned long new_queue_depth;
249 unsigned long num_rsrc_err, num_cmd_success;
250 int i;
251
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253 num_cmd_success = atomic_read(&phba->num_cmd_success);
254
255
256
257
258
259
260 if (num_rsrc_err == 0)
261 return;
262
263 vports = lpfc_create_vport_work_array(phba);
264 if (vports != NULL)
265 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
266 shost = lpfc_shost_from_vport(vports[i]);
267 shost_for_each_device(sdev, shost) {
268 new_queue_depth =
269 sdev->queue_depth * num_rsrc_err /
270 (num_rsrc_err + num_cmd_success);
271 if (!new_queue_depth)
272 new_queue_depth = sdev->queue_depth - 1;
273 else
274 new_queue_depth = sdev->queue_depth -
275 new_queue_depth;
276 scsi_change_queue_depth(sdev, new_queue_depth);
277 }
278 }
279 lpfc_destroy_vport_work_array(phba, vports);
280 atomic_set(&phba->num_rsrc_err, 0);
281 atomic_set(&phba->num_cmd_success, 0);
282 }
283
284
285
286
287
288
289
290
291
292 void
293 lpfc_scsi_dev_block(struct lpfc_hba *phba)
294 {
295 struct lpfc_vport **vports;
296 struct Scsi_Host *shost;
297 struct scsi_device *sdev;
298 struct fc_rport *rport;
299 int i;
300
301 vports = lpfc_create_vport_work_array(phba);
302 if (vports != NULL)
303 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
304 shost = lpfc_shost_from_vport(vports[i]);
305 shost_for_each_device(sdev, shost) {
306 rport = starget_to_rport(scsi_target(sdev));
307 fc_remote_port_delete(rport);
308 }
309 }
310 lpfc_destroy_vport_work_array(phba, vports);
311 }
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329 static int
330 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
331 {
332 struct lpfc_hba *phba = vport->phba;
333 struct lpfc_io_buf *psb;
334 struct ulp_bde64 *bpl;
335 IOCB_t *iocb;
336 dma_addr_t pdma_phys_fcp_cmd;
337 dma_addr_t pdma_phys_fcp_rsp;
338 dma_addr_t pdma_phys_sgl;
339 uint16_t iotag;
340 int bcnt, bpl_size;
341
342 bpl_size = phba->cfg_sg_dma_buf_size -
343 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
344
345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
346 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347 num_to_alloc, phba->cfg_sg_dma_buf_size,
348 (int)sizeof(struct fcp_cmnd),
349 (int)sizeof(struct fcp_rsp), bpl_size);
350
351 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
352 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
353 if (!psb)
354 break;
355
356
357
358
359
360
361
362 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
363 GFP_KERNEL, &psb->dma_handle);
364 if (!psb->data) {
365 kfree(psb);
366 break;
367 }
368
369
370
371 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
372 if (iotag == 0) {
373 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
374 psb->data, psb->dma_handle);
375 kfree(psb);
376 break;
377 }
378 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
379
380 psb->fcp_cmnd = psb->data;
381 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
382 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
383 sizeof(struct fcp_rsp);
384
385
386 bpl = (struct ulp_bde64 *)psb->dma_sgl;
387 pdma_phys_fcp_cmd = psb->dma_handle;
388 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
389 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
390 sizeof(struct fcp_rsp);
391
392
393
394
395
396
397 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
398 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
399 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
400 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
401 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
402
403
404 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
405 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
406 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
407 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
408 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
409
410
411
412
413
414 iocb = &psb->cur_iocbq.iocb;
415 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
416 if ((phba->sli_rev == 3) &&
417 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
418
419 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
420 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
421 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
422 unsli3.fcp_ext.icd);
423 iocb->un.fcpi64.bdl.addrHigh = 0;
424 iocb->ulpBdeCount = 0;
425 iocb->ulpLe = 0;
426
427 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
428 BUFF_TYPE_BDE_64;
429 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
430 sizeof(struct fcp_rsp);
431 iocb->unsli3.fcp_ext.rbde.addrLow =
432 putPaddrLow(pdma_phys_fcp_rsp);
433 iocb->unsli3.fcp_ext.rbde.addrHigh =
434 putPaddrHigh(pdma_phys_fcp_rsp);
435 } else {
436 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
437 iocb->un.fcpi64.bdl.bdeSize =
438 (2 * sizeof(struct ulp_bde64));
439 iocb->un.fcpi64.bdl.addrLow =
440 putPaddrLow(pdma_phys_sgl);
441 iocb->un.fcpi64.bdl.addrHigh =
442 putPaddrHigh(pdma_phys_sgl);
443 iocb->ulpBdeCount = 1;
444 iocb->ulpLe = 1;
445 }
446 iocb->ulpClass = CLASS3;
447 psb->status = IOSTAT_SUCCESS;
448
449 psb->cur_iocbq.context1 = psb;
450 spin_lock_init(&psb->buf_lock);
451 lpfc_release_scsi_buf_s3(phba, psb);
452
453 }
454
455 return bcnt;
456 }
457
458
459
460
461
462
463
464
465 void
466 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
467 {
468 struct lpfc_hba *phba = vport->phba;
469 struct lpfc_io_buf *psb, *next_psb;
470 struct lpfc_sli4_hdw_queue *qp;
471 unsigned long iflag = 0;
472 int idx;
473
474 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
475 return;
476
477 spin_lock_irqsave(&phba->hbalock, iflag);
478 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
479 qp = &phba->sli4_hba.hdwq[idx];
480
481 spin_lock(&qp->abts_io_buf_list_lock);
482 list_for_each_entry_safe(psb, next_psb,
483 &qp->lpfc_abts_io_buf_list, list) {
484 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
485 continue;
486
487 if (psb->rdata && psb->rdata->pnode &&
488 psb->rdata->pnode->vport == vport)
489 psb->rdata = NULL;
490 }
491 spin_unlock(&qp->abts_io_buf_list_lock);
492 }
493 spin_unlock_irqrestore(&phba->hbalock, iflag);
494 }
495
496
497
498
499
500
501
502
503
504 void
505 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
506 struct sli4_wcqe_xri_aborted *axri, int idx)
507 {
508 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
509 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
510 struct lpfc_io_buf *psb, *next_psb;
511 struct lpfc_sli4_hdw_queue *qp;
512 unsigned long iflag = 0;
513 struct lpfc_iocbq *iocbq;
514 int i;
515 struct lpfc_nodelist *ndlp;
516 int rrq_empty = 0;
517 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
518
519 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
520 return;
521
522 qp = &phba->sli4_hba.hdwq[idx];
523 spin_lock_irqsave(&phba->hbalock, iflag);
524 spin_lock(&qp->abts_io_buf_list_lock);
525 list_for_each_entry_safe(psb, next_psb,
526 &qp->lpfc_abts_io_buf_list, list) {
527 if (psb->cur_iocbq.sli4_xritag == xri) {
528 list_del_init(&psb->list);
529 psb->flags &= ~LPFC_SBUF_XBUSY;
530 psb->status = IOSTAT_SUCCESS;
531 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
532 qp->abts_nvme_io_bufs--;
533 spin_unlock(&qp->abts_io_buf_list_lock);
534 spin_unlock_irqrestore(&phba->hbalock, iflag);
535 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
536 return;
537 }
538 qp->abts_scsi_io_bufs--;
539 spin_unlock(&qp->abts_io_buf_list_lock);
540
541 if (psb->rdata && psb->rdata->pnode)
542 ndlp = psb->rdata->pnode;
543 else
544 ndlp = NULL;
545
546 rrq_empty = list_empty(&phba->active_rrq_list);
547 spin_unlock_irqrestore(&phba->hbalock, iflag);
548 if (ndlp) {
549 lpfc_set_rrq_active(phba, ndlp,
550 psb->cur_iocbq.sli4_lxritag, rxid, 1);
551 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
552 }
553 lpfc_release_scsi_buf_s4(phba, psb);
554 if (rrq_empty)
555 lpfc_worker_wake_up(phba);
556 return;
557 }
558 }
559 spin_unlock(&qp->abts_io_buf_list_lock);
560 for (i = 1; i <= phba->sli.last_iotag; i++) {
561 iocbq = phba->sli.iocbq_lookup[i];
562
563 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
564 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
565 continue;
566 if (iocbq->sli4_xritag != xri)
567 continue;
568 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
569 psb->flags &= ~LPFC_SBUF_XBUSY;
570 spin_unlock_irqrestore(&phba->hbalock, iflag);
571 if (!list_empty(&pring->txq))
572 lpfc_worker_wake_up(phba);
573 return;
574
575 }
576 spin_unlock_irqrestore(&phba->hbalock, iflag);
577 }
578
579
580
581
582
583
584
585
586
587
588
589
590 static struct lpfc_io_buf *
591 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
592 struct scsi_cmnd *cmnd)
593 {
594 struct lpfc_io_buf *lpfc_cmd = NULL;
595 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
596 unsigned long iflag = 0;
597
598 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
599 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
600 list);
601 if (!lpfc_cmd) {
602 spin_lock(&phba->scsi_buf_list_put_lock);
603 list_splice(&phba->lpfc_scsi_buf_list_put,
604 &phba->lpfc_scsi_buf_list_get);
605 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
606 list_remove_head(scsi_buf_list_get, lpfc_cmd,
607 struct lpfc_io_buf, list);
608 spin_unlock(&phba->scsi_buf_list_put_lock);
609 }
610 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
611
612 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
613 atomic_inc(&ndlp->cmd_pending);
614 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
615 }
616 return lpfc_cmd;
617 }
618
619
620
621
622
623
624
625
626
627
628
629 static struct lpfc_io_buf *
630 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
631 struct scsi_cmnd *cmnd)
632 {
633 struct lpfc_io_buf *lpfc_cmd;
634 struct lpfc_sli4_hdw_queue *qp;
635 struct sli4_sge *sgl;
636 IOCB_t *iocb;
637 dma_addr_t pdma_phys_fcp_rsp;
638 dma_addr_t pdma_phys_fcp_cmd;
639 uint32_t cpu, idx;
640 int tag;
641 struct fcp_cmd_rsp_buf *tmp = NULL;
642
643 cpu = raw_smp_processor_id();
644 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
645 tag = blk_mq_unique_tag(cmnd->request);
646 idx = blk_mq_unique_tag_to_hwq(tag);
647 } else {
648 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
649 }
650
651 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
652 !phba->cfg_xri_rebalancing);
653 if (!lpfc_cmd) {
654 qp = &phba->sli4_hba.hdwq[idx];
655 qp->empty_io_bufs++;
656 return NULL;
657 }
658
659
660
661
662 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
663 lpfc_cmd->prot_seg_cnt = 0;
664 lpfc_cmd->seg_cnt = 0;
665 lpfc_cmd->timeout = 0;
666 lpfc_cmd->flags = 0;
667 lpfc_cmd->start_time = jiffies;
668 lpfc_cmd->waitq = NULL;
669 lpfc_cmd->cpu = cpu;
670 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
671 lpfc_cmd->prot_data_type = 0;
672 #endif
673 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
674 if (!tmp) {
675 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
676 return NULL;
677 }
678
679 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
680 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
681
682
683
684
685
686
687 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
688 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
689 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
690 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
691 sgl->word2 = le32_to_cpu(sgl->word2);
692 bf_set(lpfc_sli4_sge_last, sgl, 0);
693 sgl->word2 = cpu_to_le32(sgl->word2);
694 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
695 sgl++;
696
697
698 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
699 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
700 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
701 sgl->word2 = le32_to_cpu(sgl->word2);
702 bf_set(lpfc_sli4_sge_last, sgl, 1);
703 sgl->word2 = cpu_to_le32(sgl->word2);
704 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
705
706
707
708
709
710 iocb = &lpfc_cmd->cur_iocbq.iocb;
711 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
712 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
713
714
715
716
717 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
718 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
719 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
720 iocb->ulpBdeCount = 1;
721 iocb->ulpLe = 1;
722 iocb->ulpClass = CLASS3;
723
724 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
725 atomic_inc(&ndlp->cmd_pending);
726 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
727 }
728 return lpfc_cmd;
729 }
730
731
732
733
734
735
736
737
738
739
740
741 static struct lpfc_io_buf*
742 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
743 struct scsi_cmnd *cmnd)
744 {
745 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
746 }
747
748
749
750
751
752
753
754
755
756 static void
757 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
758 {
759 unsigned long iflag = 0;
760
761 psb->seg_cnt = 0;
762 psb->prot_seg_cnt = 0;
763
764 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
765 psb->pCmd = NULL;
766 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
767 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
768 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
769 }
770
771
772
773
774
775
776
777
778
779
780
781 static void
782 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
783 {
784 struct lpfc_sli4_hdw_queue *qp;
785 unsigned long iflag = 0;
786
787 psb->seg_cnt = 0;
788 psb->prot_seg_cnt = 0;
789
790 qp = psb->hdwq;
791 if (psb->flags & LPFC_SBUF_XBUSY) {
792 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
793 psb->pCmd = NULL;
794 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
795 qp->abts_scsi_io_bufs++;
796 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
797 } else {
798 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
799 }
800 }
801
802
803
804
805
806
807
808
809
810 static void
811 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
812 {
813 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
814 atomic_dec(&psb->ndlp->cmd_pending);
815
816 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
817 phba->lpfc_release_scsi_buf(phba, psb);
818 }
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834 static int
835 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
836 {
837 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
838 struct scatterlist *sgel = NULL;
839 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
840 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
841 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
842 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
843 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
844 dma_addr_t physaddr;
845 uint32_t num_bde = 0;
846 int nseg, datadir = scsi_cmnd->sc_data_direction;
847
848
849
850
851
852
853
854 bpl += 2;
855 if (scsi_sg_count(scsi_cmnd)) {
856
857
858
859
860
861
862
863 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
864 scsi_sg_count(scsi_cmnd), datadir);
865 if (unlikely(!nseg))
866 return 1;
867
868 lpfc_cmd->seg_cnt = nseg;
869 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
870 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
871 "9064 BLKGRD: %s: Too many sg segments from "
872 "dma_map_sg. Config %d, seg_cnt %d\n",
873 __func__, phba->cfg_sg_seg_cnt,
874 lpfc_cmd->seg_cnt);
875 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
876 lpfc_cmd->seg_cnt = 0;
877 scsi_dma_unmap(scsi_cmnd);
878 return 2;
879 }
880
881
882
883
884
885
886
887
888
889
890 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
891 physaddr = sg_dma_address(sgel);
892 if (phba->sli_rev == 3 &&
893 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
894 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
895 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
896 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
897 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
898 data_bde->addrLow = putPaddrLow(physaddr);
899 data_bde->addrHigh = putPaddrHigh(physaddr);
900 data_bde++;
901 } else {
902 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
903 bpl->tus.f.bdeSize = sg_dma_len(sgel);
904 bpl->tus.w = le32_to_cpu(bpl->tus.w);
905 bpl->addrLow =
906 le32_to_cpu(putPaddrLow(physaddr));
907 bpl->addrHigh =
908 le32_to_cpu(putPaddrHigh(physaddr));
909 bpl++;
910 }
911 }
912 }
913
914
915
916
917
918
919
920 if (phba->sli_rev == 3 &&
921 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
922 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
923 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
924
925
926
927
928
929 physaddr = lpfc_cmd->dma_handle;
930 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
931 data_bde->tus.f.bdeSize = (num_bde *
932 sizeof(struct ulp_bde64));
933 physaddr += (sizeof(struct fcp_cmnd) +
934 sizeof(struct fcp_rsp) +
935 (2 * sizeof(struct ulp_bde64)));
936 data_bde->addrHigh = putPaddrHigh(physaddr);
937 data_bde->addrLow = putPaddrLow(physaddr);
938
939 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
940 } else {
941
942 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
943 }
944 } else {
945 iocb_cmd->un.fcpi64.bdl.bdeSize =
946 ((num_bde + 2) * sizeof(struct ulp_bde64));
947 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
948 }
949 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
950
951
952
953
954
955 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
956 return 0;
957 }
958
959 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960
961
962 #define BG_ERR_INIT 0x1
963
964 #define BG_ERR_TGT 0x2
965
966 #define BG_ERR_SWAP 0x10
967
968
969
970
971 #define BG_ERR_CHECK 0x20
972
973
974
975
976
977
978
979
980
981
982
983 static int
984 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
985 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
986 {
987 struct scatterlist *sgpe;
988 struct lpfc_io_buf *lpfc_cmd = NULL;
989 struct scsi_dif_tuple *src = NULL;
990 struct lpfc_nodelist *ndlp;
991 struct lpfc_rport_data *rdata;
992 uint32_t op = scsi_get_prot_op(sc);
993 uint32_t blksize;
994 uint32_t numblks;
995 sector_t lba;
996 int rc = 0;
997 int blockoff = 0;
998
999 if (op == SCSI_PROT_NORMAL)
1000 return 0;
1001
1002 sgpe = scsi_prot_sglist(sc);
1003 lba = scsi_get_lba(sc);
1004
1005
1006 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1007 blksize = lpfc_cmd_blksize(sc);
1008 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1009
1010
1011 if ((phba->lpfc_injerr_lba < lba) ||
1012 (phba->lpfc_injerr_lba >= (lba + numblks)))
1013 return 0;
1014 if (sgpe) {
1015 blockoff = phba->lpfc_injerr_lba - lba;
1016 numblks = sg_dma_len(sgpe) /
1017 sizeof(struct scsi_dif_tuple);
1018 if (numblks < blockoff)
1019 blockoff = numblks;
1020 }
1021 }
1022
1023
1024 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1025 if (rdata && rdata->pnode) {
1026 ndlp = rdata->pnode;
1027
1028
1029 if (phba->lpfc_injerr_nportid &&
1030 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1031 return 0;
1032
1033
1034
1035
1036
1037 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1038 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1039 sizeof(struct lpfc_name)) != 0))
1040 return 0;
1041 }
1042
1043
1044 if (sgpe) {
1045 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1046 src += blockoff;
1047 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1048 }
1049
1050
1051 if (reftag) {
1052 if (phba->lpfc_injerr_wref_cnt) {
1053 switch (op) {
1054 case SCSI_PROT_WRITE_PASS:
1055 if (src) {
1056
1057
1058
1059
1060
1061
1062
1063
1064 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1065 "9076 BLKGRD: Injecting reftag error: "
1066 "write lba x%lx + x%x oldrefTag x%x\n",
1067 (unsigned long)lba, blockoff,
1068 be32_to_cpu(src->ref_tag));
1069
1070
1071
1072
1073
1074 if (lpfc_cmd) {
1075 lpfc_cmd->prot_data_type =
1076 LPFC_INJERR_REFTAG;
1077 lpfc_cmd->prot_data_segment =
1078 src;
1079 lpfc_cmd->prot_data =
1080 src->ref_tag;
1081 }
1082 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1083 phba->lpfc_injerr_wref_cnt--;
1084 if (phba->lpfc_injerr_wref_cnt == 0) {
1085 phba->lpfc_injerr_nportid = 0;
1086 phba->lpfc_injerr_lba =
1087 LPFC_INJERR_LBA_OFF;
1088 memset(&phba->lpfc_injerr_wwpn,
1089 0, sizeof(struct lpfc_name));
1090 }
1091 rc = BG_ERR_TGT | BG_ERR_CHECK;
1092
1093 break;
1094 }
1095
1096 case SCSI_PROT_WRITE_INSERT:
1097
1098
1099
1100
1101
1102
1103 *reftag = 0xDEADBEEF;
1104 phba->lpfc_injerr_wref_cnt--;
1105 if (phba->lpfc_injerr_wref_cnt == 0) {
1106 phba->lpfc_injerr_nportid = 0;
1107 phba->lpfc_injerr_lba =
1108 LPFC_INJERR_LBA_OFF;
1109 memset(&phba->lpfc_injerr_wwpn,
1110 0, sizeof(struct lpfc_name));
1111 }
1112 rc = BG_ERR_TGT | BG_ERR_CHECK;
1113
1114 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1115 "9078 BLKGRD: Injecting reftag error: "
1116 "write lba x%lx\n", (unsigned long)lba);
1117 break;
1118 case SCSI_PROT_WRITE_STRIP:
1119
1120
1121
1122
1123
1124 *reftag = 0xDEADBEEF;
1125 phba->lpfc_injerr_wref_cnt--;
1126 if (phba->lpfc_injerr_wref_cnt == 0) {
1127 phba->lpfc_injerr_nportid = 0;
1128 phba->lpfc_injerr_lba =
1129 LPFC_INJERR_LBA_OFF;
1130 memset(&phba->lpfc_injerr_wwpn,
1131 0, sizeof(struct lpfc_name));
1132 }
1133 rc = BG_ERR_INIT;
1134
1135 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1136 "9077 BLKGRD: Injecting reftag error: "
1137 "write lba x%lx\n", (unsigned long)lba);
1138 break;
1139 }
1140 }
1141 if (phba->lpfc_injerr_rref_cnt) {
1142 switch (op) {
1143 case SCSI_PROT_READ_INSERT:
1144 case SCSI_PROT_READ_STRIP:
1145 case SCSI_PROT_READ_PASS:
1146
1147
1148
1149
1150
1151 *reftag = 0xDEADBEEF;
1152 phba->lpfc_injerr_rref_cnt--;
1153 if (phba->lpfc_injerr_rref_cnt == 0) {
1154 phba->lpfc_injerr_nportid = 0;
1155 phba->lpfc_injerr_lba =
1156 LPFC_INJERR_LBA_OFF;
1157 memset(&phba->lpfc_injerr_wwpn,
1158 0, sizeof(struct lpfc_name));
1159 }
1160 rc = BG_ERR_INIT;
1161
1162 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1163 "9079 BLKGRD: Injecting reftag error: "
1164 "read lba x%lx\n", (unsigned long)lba);
1165 break;
1166 }
1167 }
1168 }
1169
1170
1171 if (apptag) {
1172 if (phba->lpfc_injerr_wapp_cnt) {
1173 switch (op) {
1174 case SCSI_PROT_WRITE_PASS:
1175 if (src) {
1176
1177
1178
1179
1180
1181
1182
1183
1184 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1185 "9080 BLKGRD: Injecting apptag error: "
1186 "write lba x%lx + x%x oldappTag x%x\n",
1187 (unsigned long)lba, blockoff,
1188 be16_to_cpu(src->app_tag));
1189
1190
1191
1192
1193
1194 if (lpfc_cmd) {
1195 lpfc_cmd->prot_data_type =
1196 LPFC_INJERR_APPTAG;
1197 lpfc_cmd->prot_data_segment =
1198 src;
1199 lpfc_cmd->prot_data =
1200 src->app_tag;
1201 }
1202 src->app_tag = cpu_to_be16(0xDEAD);
1203 phba->lpfc_injerr_wapp_cnt--;
1204 if (phba->lpfc_injerr_wapp_cnt == 0) {
1205 phba->lpfc_injerr_nportid = 0;
1206 phba->lpfc_injerr_lba =
1207 LPFC_INJERR_LBA_OFF;
1208 memset(&phba->lpfc_injerr_wwpn,
1209 0, sizeof(struct lpfc_name));
1210 }
1211 rc = BG_ERR_TGT | BG_ERR_CHECK;
1212 break;
1213 }
1214
1215 case SCSI_PROT_WRITE_INSERT:
1216
1217
1218
1219
1220
1221
1222 *apptag = 0xDEAD;
1223 phba->lpfc_injerr_wapp_cnt--;
1224 if (phba->lpfc_injerr_wapp_cnt == 0) {
1225 phba->lpfc_injerr_nportid = 0;
1226 phba->lpfc_injerr_lba =
1227 LPFC_INJERR_LBA_OFF;
1228 memset(&phba->lpfc_injerr_wwpn,
1229 0, sizeof(struct lpfc_name));
1230 }
1231 rc = BG_ERR_TGT | BG_ERR_CHECK;
1232
1233 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1234 "0813 BLKGRD: Injecting apptag error: "
1235 "write lba x%lx\n", (unsigned long)lba);
1236 break;
1237 case SCSI_PROT_WRITE_STRIP:
1238
1239
1240
1241
1242
1243 *apptag = 0xDEAD;
1244 phba->lpfc_injerr_wapp_cnt--;
1245 if (phba->lpfc_injerr_wapp_cnt == 0) {
1246 phba->lpfc_injerr_nportid = 0;
1247 phba->lpfc_injerr_lba =
1248 LPFC_INJERR_LBA_OFF;
1249 memset(&phba->lpfc_injerr_wwpn,
1250 0, sizeof(struct lpfc_name));
1251 }
1252 rc = BG_ERR_INIT;
1253
1254 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1255 "0812 BLKGRD: Injecting apptag error: "
1256 "write lba x%lx\n", (unsigned long)lba);
1257 break;
1258 }
1259 }
1260 if (phba->lpfc_injerr_rapp_cnt) {
1261 switch (op) {
1262 case SCSI_PROT_READ_INSERT:
1263 case SCSI_PROT_READ_STRIP:
1264 case SCSI_PROT_READ_PASS:
1265
1266
1267
1268
1269
1270 *apptag = 0xDEAD;
1271 phba->lpfc_injerr_rapp_cnt--;
1272 if (phba->lpfc_injerr_rapp_cnt == 0) {
1273 phba->lpfc_injerr_nportid = 0;
1274 phba->lpfc_injerr_lba =
1275 LPFC_INJERR_LBA_OFF;
1276 memset(&phba->lpfc_injerr_wwpn,
1277 0, sizeof(struct lpfc_name));
1278 }
1279 rc = BG_ERR_INIT;
1280
1281 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1282 "0814 BLKGRD: Injecting apptag error: "
1283 "read lba x%lx\n", (unsigned long)lba);
1284 break;
1285 }
1286 }
1287 }
1288
1289
1290
1291 if (new_guard) {
1292 if (phba->lpfc_injerr_wgrd_cnt) {
1293 switch (op) {
1294 case SCSI_PROT_WRITE_PASS:
1295 rc = BG_ERR_CHECK;
1296
1297
1298 case SCSI_PROT_WRITE_INSERT:
1299
1300
1301
1302
1303
1304 phba->lpfc_injerr_wgrd_cnt--;
1305 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1306 phba->lpfc_injerr_nportid = 0;
1307 phba->lpfc_injerr_lba =
1308 LPFC_INJERR_LBA_OFF;
1309 memset(&phba->lpfc_injerr_wwpn,
1310 0, sizeof(struct lpfc_name));
1311 }
1312
1313 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1314
1315
1316 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1317 "0817 BLKGRD: Injecting guard error: "
1318 "write lba x%lx\n", (unsigned long)lba);
1319 break;
1320 case SCSI_PROT_WRITE_STRIP:
1321
1322
1323
1324
1325
1326 phba->lpfc_injerr_wgrd_cnt--;
1327 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1328 phba->lpfc_injerr_nportid = 0;
1329 phba->lpfc_injerr_lba =
1330 LPFC_INJERR_LBA_OFF;
1331 memset(&phba->lpfc_injerr_wwpn,
1332 0, sizeof(struct lpfc_name));
1333 }
1334
1335 rc = BG_ERR_INIT | BG_ERR_SWAP;
1336
1337
1338 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1339 "0816 BLKGRD: Injecting guard error: "
1340 "write lba x%lx\n", (unsigned long)lba);
1341 break;
1342 }
1343 }
1344 if (phba->lpfc_injerr_rgrd_cnt) {
1345 switch (op) {
1346 case SCSI_PROT_READ_INSERT:
1347 case SCSI_PROT_READ_STRIP:
1348 case SCSI_PROT_READ_PASS:
1349
1350
1351
1352
1353
1354 phba->lpfc_injerr_rgrd_cnt--;
1355 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1356 phba->lpfc_injerr_nportid = 0;
1357 phba->lpfc_injerr_lba =
1358 LPFC_INJERR_LBA_OFF;
1359 memset(&phba->lpfc_injerr_wwpn,
1360 0, sizeof(struct lpfc_name));
1361 }
1362
1363 rc = BG_ERR_INIT | BG_ERR_SWAP;
1364
1365
1366 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1367 "0818 BLKGRD: Injecting guard error: "
1368 "read lba x%lx\n", (unsigned long)lba);
1369 }
1370 }
1371 }
1372
1373 return rc;
1374 }
1375 #endif
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 static int
1389 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1390 uint8_t *txop, uint8_t *rxop)
1391 {
1392 uint8_t ret = 0;
1393
1394 if (lpfc_cmd_guard_csum(sc)) {
1395 switch (scsi_get_prot_op(sc)) {
1396 case SCSI_PROT_READ_INSERT:
1397 case SCSI_PROT_WRITE_STRIP:
1398 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1399 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1400 break;
1401
1402 case SCSI_PROT_READ_STRIP:
1403 case SCSI_PROT_WRITE_INSERT:
1404 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1405 *txop = BG_OP_IN_NODIF_OUT_CRC;
1406 break;
1407
1408 case SCSI_PROT_READ_PASS:
1409 case SCSI_PROT_WRITE_PASS:
1410 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1411 *txop = BG_OP_IN_CSUM_OUT_CRC;
1412 break;
1413
1414 case SCSI_PROT_NORMAL:
1415 default:
1416 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1417 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1418 scsi_get_prot_op(sc));
1419 ret = 1;
1420 break;
1421
1422 }
1423 } else {
1424 switch (scsi_get_prot_op(sc)) {
1425 case SCSI_PROT_READ_STRIP:
1426 case SCSI_PROT_WRITE_INSERT:
1427 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1428 *txop = BG_OP_IN_NODIF_OUT_CRC;
1429 break;
1430
1431 case SCSI_PROT_READ_PASS:
1432 case SCSI_PROT_WRITE_PASS:
1433 *rxop = BG_OP_IN_CRC_OUT_CRC;
1434 *txop = BG_OP_IN_CRC_OUT_CRC;
1435 break;
1436
1437 case SCSI_PROT_READ_INSERT:
1438 case SCSI_PROT_WRITE_STRIP:
1439 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1440 *txop = BG_OP_IN_CRC_OUT_NODIF;
1441 break;
1442
1443 case SCSI_PROT_NORMAL:
1444 default:
1445 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1446 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1447 scsi_get_prot_op(sc));
1448 ret = 1;
1449 break;
1450 }
1451 }
1452
1453 return ret;
1454 }
1455
1456 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468 static int
1469 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1470 uint8_t *txop, uint8_t *rxop)
1471 {
1472 uint8_t ret = 0;
1473
1474 if (lpfc_cmd_guard_csum(sc)) {
1475 switch (scsi_get_prot_op(sc)) {
1476 case SCSI_PROT_READ_INSERT:
1477 case SCSI_PROT_WRITE_STRIP:
1478 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1479 *txop = BG_OP_IN_CRC_OUT_NODIF;
1480 break;
1481
1482 case SCSI_PROT_READ_STRIP:
1483 case SCSI_PROT_WRITE_INSERT:
1484 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1485 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1486 break;
1487
1488 case SCSI_PROT_READ_PASS:
1489 case SCSI_PROT_WRITE_PASS:
1490 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1491 *txop = BG_OP_IN_CRC_OUT_CSUM;
1492 break;
1493
1494 case SCSI_PROT_NORMAL:
1495 default:
1496 break;
1497
1498 }
1499 } else {
1500 switch (scsi_get_prot_op(sc)) {
1501 case SCSI_PROT_READ_STRIP:
1502 case SCSI_PROT_WRITE_INSERT:
1503 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1504 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1505 break;
1506
1507 case SCSI_PROT_READ_PASS:
1508 case SCSI_PROT_WRITE_PASS:
1509 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1510 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1511 break;
1512
1513 case SCSI_PROT_READ_INSERT:
1514 case SCSI_PROT_WRITE_STRIP:
1515 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1516 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1517 break;
1518
1519 case SCSI_PROT_NORMAL:
1520 default:
1521 break;
1522 }
1523 }
1524
1525 return ret;
1526 }
1527 #endif
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 static int
1561 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1562 struct ulp_bde64 *bpl, int datasegcnt)
1563 {
1564 struct scatterlist *sgde = NULL;
1565 struct lpfc_pde5 *pde5 = NULL;
1566 struct lpfc_pde6 *pde6 = NULL;
1567 dma_addr_t physaddr;
1568 int i = 0, num_bde = 0, status;
1569 int datadir = sc->sc_data_direction;
1570 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1571 uint32_t rc;
1572 #endif
1573 uint32_t checking = 1;
1574 uint32_t reftag;
1575 uint8_t txop, rxop;
1576
1577 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1578 if (status)
1579 goto out;
1580
1581
1582 reftag = (uint32_t)scsi_get_lba(sc);
1583
1584 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1585 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1586 if (rc) {
1587 if (rc & BG_ERR_SWAP)
1588 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1589 if (rc & BG_ERR_CHECK)
1590 checking = 0;
1591 }
1592 #endif
1593
1594
1595 pde5 = (struct lpfc_pde5 *) bpl;
1596 memset(pde5, 0, sizeof(struct lpfc_pde5));
1597 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1598
1599
1600 pde5->word0 = cpu_to_le32(pde5->word0);
1601 pde5->reftag = cpu_to_le32(reftag);
1602
1603
1604 num_bde++;
1605 bpl++;
1606 pde6 = (struct lpfc_pde6 *) bpl;
1607
1608
1609 memset(pde6, 0, sizeof(struct lpfc_pde6));
1610 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1611 bf_set(pde6_optx, pde6, txop);
1612 bf_set(pde6_oprx, pde6, rxop);
1613
1614
1615
1616
1617
1618 if (datadir == DMA_FROM_DEVICE) {
1619 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1620 bf_set(pde6_ce, pde6, checking);
1621 else
1622 bf_set(pde6_ce, pde6, 0);
1623
1624 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1625 bf_set(pde6_re, pde6, checking);
1626 else
1627 bf_set(pde6_re, pde6, 0);
1628 }
1629 bf_set(pde6_ai, pde6, 1);
1630 bf_set(pde6_ae, pde6, 0);
1631 bf_set(pde6_apptagval, pde6, 0);
1632
1633
1634 pde6->word0 = cpu_to_le32(pde6->word0);
1635 pde6->word1 = cpu_to_le32(pde6->word1);
1636 pde6->word2 = cpu_to_le32(pde6->word2);
1637
1638
1639 num_bde++;
1640 bpl++;
1641
1642
1643 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1644 physaddr = sg_dma_address(sgde);
1645 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1646 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1647 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1648 if (datadir == DMA_TO_DEVICE)
1649 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1650 else
1651 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1652 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1653 bpl++;
1654 num_bde++;
1655 }
1656
1657 out:
1658 return num_bde;
1659 }
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700 static int
1701 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1702 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1703 {
1704 struct scatterlist *sgde = NULL;
1705 struct scatterlist *sgpe = NULL;
1706 struct lpfc_pde5 *pde5 = NULL;
1707 struct lpfc_pde6 *pde6 = NULL;
1708 struct lpfc_pde7 *pde7 = NULL;
1709 dma_addr_t dataphysaddr, protphysaddr;
1710 unsigned short curr_data = 0, curr_prot = 0;
1711 unsigned int split_offset;
1712 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1713 unsigned int protgrp_blks, protgrp_bytes;
1714 unsigned int remainder, subtotal;
1715 int status;
1716 int datadir = sc->sc_data_direction;
1717 unsigned char pgdone = 0, alldone = 0;
1718 unsigned blksize;
1719 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1720 uint32_t rc;
1721 #endif
1722 uint32_t checking = 1;
1723 uint32_t reftag;
1724 uint8_t txop, rxop;
1725 int num_bde = 0;
1726
1727 sgpe = scsi_prot_sglist(sc);
1728 sgde = scsi_sglist(sc);
1729
1730 if (!sgpe || !sgde) {
1731 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1732 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1733 sgpe, sgde);
1734 return 0;
1735 }
1736
1737 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1738 if (status)
1739 goto out;
1740
1741
1742 blksize = lpfc_cmd_blksize(sc);
1743 reftag = (uint32_t)scsi_get_lba(sc);
1744
1745 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1746 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1747 if (rc) {
1748 if (rc & BG_ERR_SWAP)
1749 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1750 if (rc & BG_ERR_CHECK)
1751 checking = 0;
1752 }
1753 #endif
1754
1755 split_offset = 0;
1756 do {
1757
1758 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1759 return num_bde + 3;
1760
1761
1762 pde5 = (struct lpfc_pde5 *) bpl;
1763 memset(pde5, 0, sizeof(struct lpfc_pde5));
1764 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1765
1766
1767 pde5->word0 = cpu_to_le32(pde5->word0);
1768 pde5->reftag = cpu_to_le32(reftag);
1769
1770
1771 num_bde++;
1772 bpl++;
1773 pde6 = (struct lpfc_pde6 *) bpl;
1774
1775
1776 memset(pde6, 0, sizeof(struct lpfc_pde6));
1777 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1778 bf_set(pde6_optx, pde6, txop);
1779 bf_set(pde6_oprx, pde6, rxop);
1780
1781 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1782 bf_set(pde6_ce, pde6, checking);
1783 else
1784 bf_set(pde6_ce, pde6, 0);
1785
1786 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1787 bf_set(pde6_re, pde6, checking);
1788 else
1789 bf_set(pde6_re, pde6, 0);
1790
1791 bf_set(pde6_ai, pde6, 1);
1792 bf_set(pde6_ae, pde6, 0);
1793 bf_set(pde6_apptagval, pde6, 0);
1794
1795
1796 pde6->word0 = cpu_to_le32(pde6->word0);
1797 pde6->word1 = cpu_to_le32(pde6->word1);
1798 pde6->word2 = cpu_to_le32(pde6->word2);
1799
1800
1801 num_bde++;
1802 bpl++;
1803
1804
1805 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1806 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1807
1808
1809 BUG_ON(protgroup_len % 8);
1810
1811 pde7 = (struct lpfc_pde7 *) bpl;
1812 memset(pde7, 0, sizeof(struct lpfc_pde7));
1813 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1814
1815 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1816 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1817
1818 protgrp_blks = protgroup_len / 8;
1819 protgrp_bytes = protgrp_blks * blksize;
1820
1821
1822 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1823 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1824 protgroup_offset += protgroup_remainder;
1825 protgrp_blks = protgroup_remainder / 8;
1826 protgrp_bytes = protgrp_blks * blksize;
1827 } else {
1828 protgroup_offset = 0;
1829 curr_prot++;
1830 }
1831
1832 num_bde++;
1833
1834
1835 pgdone = 0;
1836 subtotal = 0;
1837 while (!pgdone) {
1838
1839 if (num_bde >= phba->cfg_total_seg_cnt)
1840 return num_bde + 1;
1841
1842 if (!sgde) {
1843 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1844 "9065 BLKGRD:%s Invalid data segment\n",
1845 __func__);
1846 return 0;
1847 }
1848 bpl++;
1849 dataphysaddr = sg_dma_address(sgde) + split_offset;
1850 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1851 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1852
1853 remainder = sg_dma_len(sgde) - split_offset;
1854
1855 if ((subtotal + remainder) <= protgrp_bytes) {
1856
1857 bpl->tus.f.bdeSize = remainder;
1858 split_offset = 0;
1859
1860 if ((subtotal + remainder) == protgrp_bytes)
1861 pgdone = 1;
1862 } else {
1863
1864 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1865 split_offset += bpl->tus.f.bdeSize;
1866 }
1867
1868 subtotal += bpl->tus.f.bdeSize;
1869
1870 if (datadir == DMA_TO_DEVICE)
1871 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1872 else
1873 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1874 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1875
1876 num_bde++;
1877 curr_data++;
1878
1879 if (split_offset)
1880 break;
1881
1882
1883 sgde = sg_next(sgde);
1884
1885 }
1886
1887 if (protgroup_offset) {
1888
1889 reftag += protgrp_blks;
1890 bpl++;
1891 continue;
1892 }
1893
1894
1895 if (curr_prot == protcnt) {
1896 alldone = 1;
1897 } else if (curr_prot < protcnt) {
1898
1899 sgpe = sg_next(sgpe);
1900 bpl++;
1901
1902
1903 reftag += protgrp_blks;
1904 } else {
1905
1906 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1907 "9054 BLKGRD: bug in %s\n", __func__);
1908 }
1909
1910 } while (!alldone);
1911 out:
1912
1913 return num_bde;
1914 }
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945 static int
1946 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1947 struct sli4_sge *sgl, int datasegcnt,
1948 struct lpfc_io_buf *lpfc_cmd)
1949 {
1950 struct scatterlist *sgde = NULL;
1951 struct sli4_sge_diseed *diseed = NULL;
1952 dma_addr_t physaddr;
1953 int i = 0, num_sge = 0, status;
1954 uint32_t reftag;
1955 uint8_t txop, rxop;
1956 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1957 uint32_t rc;
1958 #endif
1959 uint32_t checking = 1;
1960 uint32_t dma_len;
1961 uint32_t dma_offset = 0;
1962 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1963 int j;
1964 bool lsp_just_set = false;
1965
1966 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1967 if (status)
1968 goto out;
1969
1970
1971 reftag = (uint32_t)scsi_get_lba(sc);
1972
1973 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1974 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1975 if (rc) {
1976 if (rc & BG_ERR_SWAP)
1977 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1978 if (rc & BG_ERR_CHECK)
1979 checking = 0;
1980 }
1981 #endif
1982
1983
1984 diseed = (struct sli4_sge_diseed *) sgl;
1985 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1986 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1987
1988
1989 diseed->ref_tag = cpu_to_le32(reftag);
1990 diseed->ref_tag_tran = diseed->ref_tag;
1991
1992
1993
1994
1995
1996 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1997 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1998 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
1999 else
2000 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2001
2002 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2003 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2004 else
2005 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2006 }
2007
2008
2009 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2010 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2011
2012 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2013 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2014
2015
2016 diseed->word2 = cpu_to_le32(diseed->word2);
2017 diseed->word3 = cpu_to_le32(diseed->word3);
2018
2019
2020 num_sge++;
2021 sgl++;
2022
2023
2024 sgde = scsi_sglist(sc);
2025 j = 3;
2026 for (i = 0; i < datasegcnt; i++) {
2027
2028 sgl->word2 = 0;
2029
2030
2031 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2032 ((datasegcnt - 1) != i)) {
2033
2034 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2035
2036 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2037
2038 if (unlikely(!sgl_xtra)) {
2039 lpfc_cmd->seg_cnt = 0;
2040 return 0;
2041 }
2042 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2043 sgl_xtra->dma_phys_sgl));
2044 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2045 sgl_xtra->dma_phys_sgl));
2046
2047 } else {
2048 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2049 }
2050
2051 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2052 if ((datasegcnt - 1) == i)
2053 bf_set(lpfc_sli4_sge_last, sgl, 1);
2054 physaddr = sg_dma_address(sgde);
2055 dma_len = sg_dma_len(sgde);
2056 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2057 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2058
2059 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2060 sgl->word2 = cpu_to_le32(sgl->word2);
2061 sgl->sge_len = cpu_to_le32(dma_len);
2062
2063 dma_offset += dma_len;
2064 sgde = sg_next(sgde);
2065
2066 sgl++;
2067 num_sge++;
2068 lsp_just_set = false;
2069
2070 } else {
2071 sgl->word2 = cpu_to_le32(sgl->word2);
2072 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2073
2074 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2075 i = i - 1;
2076
2077 lsp_just_set = true;
2078 }
2079
2080 j++;
2081
2082 }
2083
2084 out:
2085 return num_sge;
2086 }
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 static int
2126 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2127 struct sli4_sge *sgl, int datacnt, int protcnt,
2128 struct lpfc_io_buf *lpfc_cmd)
2129 {
2130 struct scatterlist *sgde = NULL;
2131 struct scatterlist *sgpe = NULL;
2132 struct sli4_sge_diseed *diseed = NULL;
2133 dma_addr_t dataphysaddr, protphysaddr;
2134 unsigned short curr_data = 0, curr_prot = 0;
2135 unsigned int split_offset;
2136 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2137 unsigned int protgrp_blks, protgrp_bytes;
2138 unsigned int remainder, subtotal;
2139 int status;
2140 unsigned char pgdone = 0, alldone = 0;
2141 unsigned blksize;
2142 uint32_t reftag;
2143 uint8_t txop, rxop;
2144 uint32_t dma_len;
2145 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2146 uint32_t rc;
2147 #endif
2148 uint32_t checking = 1;
2149 uint32_t dma_offset = 0;
2150 int num_sge = 0, j = 2;
2151 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2152
2153 sgpe = scsi_prot_sglist(sc);
2154 sgde = scsi_sglist(sc);
2155
2156 if (!sgpe || !sgde) {
2157 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2158 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2159 sgpe, sgde);
2160 return 0;
2161 }
2162
2163 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2164 if (status)
2165 goto out;
2166
2167
2168 blksize = lpfc_cmd_blksize(sc);
2169 reftag = (uint32_t)scsi_get_lba(sc);
2170
2171 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2172 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2173 if (rc) {
2174 if (rc & BG_ERR_SWAP)
2175 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2176 if (rc & BG_ERR_CHECK)
2177 checking = 0;
2178 }
2179 #endif
2180
2181 split_offset = 0;
2182 do {
2183
2184 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2185 !(phba->cfg_xpsgl))
2186 return num_sge + 3;
2187
2188
2189 if (!((j + 1) % phba->border_sge_num) ||
2190 !((j + 2) % phba->border_sge_num) ||
2191 !((j + 3) % phba->border_sge_num)) {
2192 sgl->word2 = 0;
2193
2194
2195 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2196
2197 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2198
2199 if (unlikely(!sgl_xtra)) {
2200 goto out;
2201 } else {
2202 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2203 sgl_xtra->dma_phys_sgl));
2204 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2205 sgl_xtra->dma_phys_sgl));
2206 }
2207
2208 sgl->word2 = cpu_to_le32(sgl->word2);
2209 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2210
2211 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2212 j = 0;
2213 }
2214
2215
2216 diseed = (struct sli4_sge_diseed *) sgl;
2217 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2218 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2219
2220
2221 diseed->ref_tag = cpu_to_le32(reftag);
2222 diseed->ref_tag_tran = diseed->ref_tag;
2223
2224 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2225 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2226
2227 } else {
2228 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2229
2230
2231
2232
2233
2234
2235
2236 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2237 txop = BG_OP_RAW_MODE;
2238 rxop = BG_OP_RAW_MODE;
2239 }
2240 }
2241
2242
2243 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2244 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2245 else
2246 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2247
2248
2249 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2250 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2251
2252 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2253 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2254
2255
2256 diseed->word2 = cpu_to_le32(diseed->word2);
2257 diseed->word3 = cpu_to_le32(diseed->word3);
2258
2259
2260 num_sge++;
2261
2262 sgl++;
2263 j++;
2264
2265
2266 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2267 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2268
2269
2270 BUG_ON(protgroup_len % 8);
2271
2272
2273 sgl->word2 = 0;
2274 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2275 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2276 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2277 sgl->word2 = cpu_to_le32(sgl->word2);
2278 sgl->sge_len = 0;
2279
2280 protgrp_blks = protgroup_len / 8;
2281 protgrp_bytes = protgrp_blks * blksize;
2282
2283
2284 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2285 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2286 protgroup_offset += protgroup_remainder;
2287 protgrp_blks = protgroup_remainder / 8;
2288 protgrp_bytes = protgrp_blks * blksize;
2289 } else {
2290 protgroup_offset = 0;
2291 curr_prot++;
2292 }
2293
2294 num_sge++;
2295
2296
2297 pgdone = 0;
2298 subtotal = 0;
2299
2300 sgl++;
2301 j++;
2302
2303 while (!pgdone) {
2304
2305 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2306 !phba->cfg_xpsgl)
2307 return num_sge + 1;
2308
2309 if (!sgde) {
2310 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2311 "9086 BLKGRD:%s Invalid data segment\n",
2312 __func__);
2313 return 0;
2314 }
2315
2316 if (!((j + 1) % phba->border_sge_num)) {
2317 sgl->word2 = 0;
2318
2319
2320 bf_set(lpfc_sli4_sge_type, sgl,
2321 LPFC_SGE_TYPE_LSP);
2322
2323 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2324 lpfc_cmd);
2325
2326 if (unlikely(!sgl_xtra)) {
2327 goto out;
2328 } else {
2329 sgl->addr_lo = cpu_to_le32(
2330 putPaddrLow(sgl_xtra->dma_phys_sgl));
2331 sgl->addr_hi = cpu_to_le32(
2332 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2333 }
2334
2335 sgl->word2 = cpu_to_le32(sgl->word2);
2336 sgl->sge_len = cpu_to_le32(
2337 phba->cfg_sg_dma_buf_size);
2338
2339 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2340 } else {
2341 dataphysaddr = sg_dma_address(sgde) +
2342 split_offset;
2343
2344 remainder = sg_dma_len(sgde) - split_offset;
2345
2346 if ((subtotal + remainder) <= protgrp_bytes) {
2347
2348 dma_len = remainder;
2349 split_offset = 0;
2350
2351 if ((subtotal + remainder) ==
2352 protgrp_bytes)
2353 pgdone = 1;
2354 } else {
2355
2356
2357
2358 dma_len = protgrp_bytes - subtotal;
2359 split_offset += dma_len;
2360 }
2361
2362 subtotal += dma_len;
2363
2364 sgl->word2 = 0;
2365 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2366 dataphysaddr));
2367 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2368 dataphysaddr));
2369 bf_set(lpfc_sli4_sge_last, sgl, 0);
2370 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2371 bf_set(lpfc_sli4_sge_type, sgl,
2372 LPFC_SGE_TYPE_DATA);
2373
2374 sgl->sge_len = cpu_to_le32(dma_len);
2375 dma_offset += dma_len;
2376
2377 num_sge++;
2378 curr_data++;
2379
2380 if (split_offset) {
2381 sgl++;
2382 j++;
2383 break;
2384 }
2385
2386
2387 sgde = sg_next(sgde);
2388
2389 sgl++;
2390 }
2391
2392 j++;
2393 }
2394
2395 if (protgroup_offset) {
2396
2397 reftag += protgrp_blks;
2398 continue;
2399 }
2400
2401
2402 if (curr_prot == protcnt) {
2403
2404 sgl--;
2405 bf_set(lpfc_sli4_sge_last, sgl, 1);
2406 alldone = 1;
2407 } else if (curr_prot < protcnt) {
2408
2409 sgpe = sg_next(sgpe);
2410
2411
2412 reftag += protgrp_blks;
2413 } else {
2414
2415 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2416 "9085 BLKGRD: bug in %s\n", __func__);
2417 }
2418
2419 } while (!alldone);
2420
2421 out:
2422
2423 return num_sge;
2424 }
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437 static int
2438 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2439 {
2440 int ret = LPFC_PG_TYPE_INVALID;
2441 unsigned char op = scsi_get_prot_op(sc);
2442
2443 switch (op) {
2444 case SCSI_PROT_READ_STRIP:
2445 case SCSI_PROT_WRITE_INSERT:
2446 ret = LPFC_PG_TYPE_NO_DIF;
2447 break;
2448 case SCSI_PROT_READ_INSERT:
2449 case SCSI_PROT_WRITE_STRIP:
2450 case SCSI_PROT_READ_PASS:
2451 case SCSI_PROT_WRITE_PASS:
2452 ret = LPFC_PG_TYPE_DIF_BUF;
2453 break;
2454 default:
2455 if (phba)
2456 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2457 "9021 Unsupported protection op:%d\n",
2458 op);
2459 break;
2460 }
2461 return ret;
2462 }
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474 static int
2475 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2476 struct lpfc_io_buf *lpfc_cmd)
2477 {
2478 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2479 int fcpdl;
2480
2481 fcpdl = scsi_bufflen(sc);
2482
2483
2484 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2485
2486 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2487 return fcpdl;
2488
2489 } else {
2490
2491 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2492 return fcpdl;
2493 }
2494
2495
2496
2497
2498
2499
2500 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2501
2502 return fcpdl;
2503 }
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517 static int
2518 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2519 struct lpfc_io_buf *lpfc_cmd)
2520 {
2521 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2522 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2523 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2524 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2525 uint32_t num_bde = 0;
2526 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2527 int prot_group_type = 0;
2528 int fcpdl;
2529 int ret = 1;
2530 struct lpfc_vport *vport = phba->pport;
2531
2532
2533
2534
2535
2536 bpl += 2;
2537 if (scsi_sg_count(scsi_cmnd)) {
2538
2539
2540
2541
2542
2543
2544 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2545 scsi_sglist(scsi_cmnd),
2546 scsi_sg_count(scsi_cmnd), datadir);
2547 if (unlikely(!datasegcnt))
2548 return 1;
2549
2550 lpfc_cmd->seg_cnt = datasegcnt;
2551
2552
2553 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2554 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2555 ret = 2;
2556 goto err;
2557 }
2558
2559 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2560
2561 switch (prot_group_type) {
2562 case LPFC_PG_TYPE_NO_DIF:
2563
2564
2565 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2566 ret = 2;
2567 goto err;
2568 }
2569
2570 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2571 datasegcnt);
2572
2573 if (num_bde < 2) {
2574 ret = 2;
2575 goto err;
2576 }
2577 break;
2578
2579 case LPFC_PG_TYPE_DIF_BUF:
2580
2581
2582
2583
2584
2585 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2586 scsi_prot_sglist(scsi_cmnd),
2587 scsi_prot_sg_count(scsi_cmnd), datadir);
2588 if (unlikely(!protsegcnt)) {
2589 scsi_dma_unmap(scsi_cmnd);
2590 return 1;
2591 }
2592
2593 lpfc_cmd->prot_seg_cnt = protsegcnt;
2594
2595
2596
2597
2598
2599 if ((lpfc_cmd->prot_seg_cnt * 4) >
2600 (phba->cfg_total_seg_cnt - 2)) {
2601 ret = 2;
2602 goto err;
2603 }
2604
2605 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2606 datasegcnt, protsegcnt);
2607
2608 if ((num_bde < 3) ||
2609 (num_bde > phba->cfg_total_seg_cnt)) {
2610 ret = 2;
2611 goto err;
2612 }
2613 break;
2614
2615 case LPFC_PG_TYPE_INVALID:
2616 default:
2617 scsi_dma_unmap(scsi_cmnd);
2618 lpfc_cmd->seg_cnt = 0;
2619
2620 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2621 "9022 Unexpected protection group %i\n",
2622 prot_group_type);
2623 return 2;
2624 }
2625 }
2626
2627
2628
2629
2630
2631
2632
2633 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2634 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2635 iocb_cmd->ulpBdeCount = 1;
2636 iocb_cmd->ulpLe = 1;
2637
2638 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2639 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2640
2641
2642
2643
2644
2645 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2646
2647
2648
2649
2650
2651 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2652 (fcpdl < vport->cfg_first_burst_size))
2653 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2654
2655 return 0;
2656 err:
2657 if (lpfc_cmd->seg_cnt)
2658 scsi_dma_unmap(scsi_cmnd);
2659 if (lpfc_cmd->prot_seg_cnt)
2660 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2661 scsi_prot_sg_count(scsi_cmnd),
2662 scsi_cmnd->sc_data_direction);
2663
2664 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2665 "9023 Cannot setup S/G List for HBA"
2666 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2667 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2668 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2669 prot_group_type, num_bde);
2670
2671 lpfc_cmd->seg_cnt = 0;
2672 lpfc_cmd->prot_seg_cnt = 0;
2673 return ret;
2674 }
2675
2676
2677
2678
2679
2680
2681 static uint16_t
2682 lpfc_bg_crc(uint8_t *data, int count)
2683 {
2684 uint16_t crc = 0;
2685 uint16_t x;
2686
2687 crc = crc_t10dif(data, count);
2688 x = cpu_to_be16(crc);
2689 return x;
2690 }
2691
2692
2693
2694
2695
2696
2697 static uint16_t
2698 lpfc_bg_csum(uint8_t *data, int count)
2699 {
2700 uint16_t ret;
2701
2702 ret = ip_compute_csum(data, count);
2703 return ret;
2704 }
2705
2706
2707
2708
2709
2710 static void
2711 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2712 {
2713 struct scatterlist *sgpe;
2714 struct scatterlist *sgde;
2715 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2716 struct scsi_dif_tuple *src = NULL;
2717 uint8_t *data_src = NULL;
2718 uint16_t guard_tag;
2719 uint16_t start_app_tag, app_tag;
2720 uint32_t start_ref_tag, ref_tag;
2721 int prot, protsegcnt;
2722 int err_type, len, data_len;
2723 int chk_ref, chk_app, chk_guard;
2724 uint16_t sum;
2725 unsigned blksize;
2726
2727 err_type = BGS_GUARD_ERR_MASK;
2728 sum = 0;
2729 guard_tag = 0;
2730
2731
2732 prot = scsi_get_prot_op(cmd);
2733 if ((prot == SCSI_PROT_READ_STRIP) ||
2734 (prot == SCSI_PROT_WRITE_INSERT) ||
2735 (prot == SCSI_PROT_NORMAL))
2736 goto out;
2737
2738
2739 chk_ref = 1;
2740 chk_app = 0;
2741 chk_guard = 0;
2742
2743
2744 sgpe = scsi_prot_sglist(cmd);
2745 protsegcnt = lpfc_cmd->prot_seg_cnt;
2746
2747 if (sgpe && protsegcnt) {
2748
2749
2750
2751
2752
2753 sgde = scsi_sglist(cmd);
2754 blksize = lpfc_cmd_blksize(cmd);
2755 data_src = (uint8_t *)sg_virt(sgde);
2756 data_len = sgde->length;
2757 if ((data_len & (blksize - 1)) == 0)
2758 chk_guard = 1;
2759
2760 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2761 start_ref_tag = (uint32_t)scsi_get_lba(cmd);
2762 start_app_tag = src->app_tag;
2763 len = sgpe->length;
2764 while (src && protsegcnt) {
2765 while (len) {
2766
2767
2768
2769
2770
2771 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2772 (src->app_tag == T10_PI_APP_ESCAPE)) {
2773 start_ref_tag++;
2774 goto skipit;
2775 }
2776
2777
2778 if (chk_guard) {
2779 guard_tag = src->guard_tag;
2780 if (lpfc_cmd_guard_csum(cmd))
2781 sum = lpfc_bg_csum(data_src,
2782 blksize);
2783 else
2784 sum = lpfc_bg_crc(data_src,
2785 blksize);
2786 if ((guard_tag != sum)) {
2787 err_type = BGS_GUARD_ERR_MASK;
2788 goto out;
2789 }
2790 }
2791
2792
2793 ref_tag = be32_to_cpu(src->ref_tag);
2794 if (chk_ref && (ref_tag != start_ref_tag)) {
2795 err_type = BGS_REFTAG_ERR_MASK;
2796 goto out;
2797 }
2798 start_ref_tag++;
2799
2800
2801 app_tag = src->app_tag;
2802 if (chk_app && (app_tag != start_app_tag)) {
2803 err_type = BGS_APPTAG_ERR_MASK;
2804 goto out;
2805 }
2806 skipit:
2807 len -= sizeof(struct scsi_dif_tuple);
2808 if (len < 0)
2809 len = 0;
2810 src++;
2811
2812 data_src += blksize;
2813 data_len -= blksize;
2814
2815
2816
2817
2818
2819
2820 if (chk_guard && (data_len == 0)) {
2821 chk_guard = 0;
2822 sgde = sg_next(sgde);
2823 if (!sgde)
2824 goto out;
2825
2826 data_src = (uint8_t *)sg_virt(sgde);
2827 data_len = sgde->length;
2828 if ((data_len & (blksize - 1)) == 0)
2829 chk_guard = 1;
2830 }
2831 }
2832
2833
2834 sgpe = sg_next(sgpe);
2835 if (sgpe) {
2836 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2837 len = sgpe->length;
2838 } else {
2839 src = NULL;
2840 }
2841 protsegcnt--;
2842 }
2843 }
2844 out:
2845 if (err_type == BGS_GUARD_ERR_MASK) {
2846 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2847 0x10, 0x1);
2848 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2849 SAM_STAT_CHECK_CONDITION;
2850 phba->bg_guard_err_cnt++;
2851 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2852 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2853 (unsigned long)scsi_get_lba(cmd),
2854 sum, guard_tag);
2855
2856 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2857 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2858 0x10, 0x3);
2859 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2860 SAM_STAT_CHECK_CONDITION;
2861
2862 phba->bg_reftag_err_cnt++;
2863 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2864 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2865 (unsigned long)scsi_get_lba(cmd),
2866 ref_tag, start_ref_tag);
2867
2868 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2869 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2870 0x10, 0x2);
2871 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2872 SAM_STAT_CHECK_CONDITION;
2873
2874 phba->bg_apptag_err_cnt++;
2875 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2876 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2877 (unsigned long)scsi_get_lba(cmd),
2878 app_tag, start_app_tag);
2879 }
2880 }
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895 static int
2896 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2897 struct lpfc_iocbq *pIocbOut)
2898 {
2899 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2900 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2901 int ret = 0;
2902 uint32_t bghm = bgf->bghm;
2903 uint32_t bgstat = bgf->bgstat;
2904 uint64_t failing_sector = 0;
2905
2906 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2907 cmd->result = DID_ERROR << 16;
2908 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2909 "9072 BLKGRD: Invalid BG Profile in cmd"
2910 " 0x%x lba 0x%llx blk cnt 0x%x "
2911 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2912 (unsigned long long)scsi_get_lba(cmd),
2913 blk_rq_sectors(cmd->request), bgstat, bghm);
2914 ret = (-1);
2915 goto out;
2916 }
2917
2918 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2919 cmd->result = DID_ERROR << 16;
2920 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2921 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2922 " 0x%x lba 0x%llx blk cnt 0x%x "
2923 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2924 (unsigned long long)scsi_get_lba(cmd),
2925 blk_rq_sectors(cmd->request), bgstat, bghm);
2926 ret = (-1);
2927 goto out;
2928 }
2929
2930 if (lpfc_bgs_get_guard_err(bgstat)) {
2931 ret = 1;
2932
2933 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2934 0x10, 0x1);
2935 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2936 SAM_STAT_CHECK_CONDITION;
2937 phba->bg_guard_err_cnt++;
2938 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2939 "9055 BLKGRD: Guard Tag error in cmd"
2940 " 0x%x lba 0x%llx blk cnt 0x%x "
2941 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2942 (unsigned long long)scsi_get_lba(cmd),
2943 blk_rq_sectors(cmd->request), bgstat, bghm);
2944 }
2945
2946 if (lpfc_bgs_get_reftag_err(bgstat)) {
2947 ret = 1;
2948
2949 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2950 0x10, 0x3);
2951 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2952 SAM_STAT_CHECK_CONDITION;
2953
2954 phba->bg_reftag_err_cnt++;
2955 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2956 "9056 BLKGRD: Ref Tag error in cmd"
2957 " 0x%x lba 0x%llx blk cnt 0x%x "
2958 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2959 (unsigned long long)scsi_get_lba(cmd),
2960 blk_rq_sectors(cmd->request), bgstat, bghm);
2961 }
2962
2963 if (lpfc_bgs_get_apptag_err(bgstat)) {
2964 ret = 1;
2965
2966 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2967 0x10, 0x2);
2968 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2969 SAM_STAT_CHECK_CONDITION;
2970
2971 phba->bg_apptag_err_cnt++;
2972 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2973 "9061 BLKGRD: App Tag error in cmd"
2974 " 0x%x lba 0x%llx blk cnt 0x%x "
2975 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2976 (unsigned long long)scsi_get_lba(cmd),
2977 blk_rq_sectors(cmd->request), bgstat, bghm);
2978 }
2979
2980 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2981
2982
2983
2984
2985
2986
2987 cmd->sense_buffer[7] = 0xc;
2988 cmd->sense_buffer[8] = 0;
2989 cmd->sense_buffer[9] = 0xa;
2990 cmd->sense_buffer[10] = 0x80;
2991
2992
2993 switch (scsi_get_prot_op(cmd)) {
2994 case SCSI_PROT_READ_INSERT:
2995 case SCSI_PROT_WRITE_STRIP:
2996 bghm /= cmd->device->sector_size;
2997 break;
2998 case SCSI_PROT_READ_STRIP:
2999 case SCSI_PROT_WRITE_INSERT:
3000 case SCSI_PROT_READ_PASS:
3001 case SCSI_PROT_WRITE_PASS:
3002 bghm /= (cmd->device->sector_size +
3003 sizeof(struct scsi_dif_tuple));
3004 break;
3005 }
3006
3007 failing_sector = scsi_get_lba(cmd);
3008 failing_sector += bghm;
3009
3010
3011 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3012 }
3013
3014 if (!ret) {
3015
3016 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3017 "9057 BLKGRD: Unknown error in cmd"
3018 " 0x%x lba 0x%llx blk cnt 0x%x "
3019 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3020 (unsigned long long)scsi_get_lba(cmd),
3021 blk_rq_sectors(cmd->request), bgstat, bghm);
3022
3023
3024 lpfc_calc_bg_err(phba, lpfc_cmd);
3025 }
3026 out:
3027 return ret;
3028 }
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043 static int
3044 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3045 {
3046 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3047 struct scatterlist *sgel = NULL;
3048 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3049 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3050 struct sli4_sge *first_data_sgl;
3051 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3052 dma_addr_t physaddr;
3053 uint32_t num_bde = 0;
3054 uint32_t dma_len;
3055 uint32_t dma_offset = 0;
3056 int nseg, i, j;
3057 struct ulp_bde64 *bde;
3058 bool lsp_just_set = false;
3059 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3060
3061
3062
3063
3064
3065
3066
3067 if (scsi_sg_count(scsi_cmnd)) {
3068
3069
3070
3071
3072
3073
3074
3075 nseg = scsi_dma_map(scsi_cmnd);
3076 if (unlikely(nseg <= 0))
3077 return 1;
3078 sgl += 1;
3079
3080 sgl->word2 = le32_to_cpu(sgl->word2);
3081 bf_set(lpfc_sli4_sge_last, sgl, 0);
3082 sgl->word2 = cpu_to_le32(sgl->word2);
3083 sgl += 1;
3084 first_data_sgl = sgl;
3085 lpfc_cmd->seg_cnt = nseg;
3086 if (!phba->cfg_xpsgl &&
3087 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3088 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3089 " %s: Too many sg segments from "
3090 "dma_map_sg. Config %d, seg_cnt %d\n",
3091 __func__, phba->cfg_sg_seg_cnt,
3092 lpfc_cmd->seg_cnt);
3093 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3094 lpfc_cmd->seg_cnt = 0;
3095 scsi_dma_unmap(scsi_cmnd);
3096 return 2;
3097 }
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110 sgel = scsi_sglist(scsi_cmnd);
3111 j = 2;
3112 for (i = 0; i < nseg; i++) {
3113 sgl->word2 = 0;
3114 if ((num_bde + 1) == nseg) {
3115 bf_set(lpfc_sli4_sge_last, sgl, 1);
3116 bf_set(lpfc_sli4_sge_type, sgl,
3117 LPFC_SGE_TYPE_DATA);
3118 } else {
3119 bf_set(lpfc_sli4_sge_last, sgl, 0);
3120
3121
3122 if (!lsp_just_set &&
3123 !((j + 1) % phba->border_sge_num) &&
3124 ((nseg - 1) != i)) {
3125
3126 bf_set(lpfc_sli4_sge_type, sgl,
3127 LPFC_SGE_TYPE_LSP);
3128
3129 sgl_xtra = lpfc_get_sgl_per_hdwq(
3130 phba, lpfc_cmd);
3131
3132 if (unlikely(!sgl_xtra)) {
3133 lpfc_cmd->seg_cnt = 0;
3134 scsi_dma_unmap(scsi_cmnd);
3135 return 1;
3136 }
3137 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3138 sgl_xtra->dma_phys_sgl));
3139 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3140 sgl_xtra->dma_phys_sgl));
3141
3142 } else {
3143 bf_set(lpfc_sli4_sge_type, sgl,
3144 LPFC_SGE_TYPE_DATA);
3145 }
3146 }
3147
3148 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3149 LPFC_SGE_TYPE_LSP)) {
3150 if ((nseg - 1) == i)
3151 bf_set(lpfc_sli4_sge_last, sgl, 1);
3152
3153 physaddr = sg_dma_address(sgel);
3154 dma_len = sg_dma_len(sgel);
3155 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3156 physaddr));
3157 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3158 physaddr));
3159
3160 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3161 sgl->word2 = cpu_to_le32(sgl->word2);
3162 sgl->sge_len = cpu_to_le32(dma_len);
3163
3164 dma_offset += dma_len;
3165 sgel = sg_next(sgel);
3166
3167 sgl++;
3168 lsp_just_set = false;
3169
3170 } else {
3171 sgl->word2 = cpu_to_le32(sgl->word2);
3172 sgl->sge_len = cpu_to_le32(
3173 phba->cfg_sg_dma_buf_size);
3174
3175 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3176 i = i - 1;
3177
3178 lsp_just_set = true;
3179 }
3180
3181 j++;
3182 }
3183
3184
3185
3186
3187
3188 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3189 phba->cfg_enable_pbde) {
3190 bde = (struct ulp_bde64 *)
3191 &(iocb_cmd->unsli3.sli3Words[5]);
3192 bde->addrLow = first_data_sgl->addr_lo;
3193 bde->addrHigh = first_data_sgl->addr_hi;
3194 bde->tus.f.bdeSize =
3195 le32_to_cpu(first_data_sgl->sge_len);
3196 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3197 bde->tus.w = cpu_to_le32(bde->tus.w);
3198 }
3199 } else {
3200 sgl += 1;
3201
3202 sgl->word2 = le32_to_cpu(sgl->word2);
3203 bf_set(lpfc_sli4_sge_last, sgl, 1);
3204 sgl->word2 = cpu_to_le32(sgl->word2);
3205
3206 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3207 phba->cfg_enable_pbde) {
3208 bde = (struct ulp_bde64 *)
3209 &(iocb_cmd->unsli3.sli3Words[5]);
3210 memset(bde, 0, (sizeof(uint32_t) * 3));
3211 }
3212 }
3213
3214
3215
3216
3217
3218
3219
3220 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3221
3222
3223
3224
3225
3226 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3227
3228
3229
3230
3231
3232 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3233 scsi_cmnd->device->hostdata)->oas_enabled) {
3234 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3235 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3236 scsi_cmnd->device->hostdata)->priority;
3237 }
3238
3239 return 0;
3240 }
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255 static int
3256 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3257 struct lpfc_io_buf *lpfc_cmd)
3258 {
3259 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3260 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3261 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3262 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3263 uint32_t num_sge = 0;
3264 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3265 int prot_group_type = 0;
3266 int fcpdl;
3267 int ret = 1;
3268 struct lpfc_vport *vport = phba->pport;
3269
3270
3271
3272
3273
3274 if (scsi_sg_count(scsi_cmnd)) {
3275
3276
3277
3278
3279
3280
3281 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3282 scsi_sglist(scsi_cmnd),
3283 scsi_sg_count(scsi_cmnd), datadir);
3284 if (unlikely(!datasegcnt))
3285 return 1;
3286
3287 sgl += 1;
3288
3289 sgl->word2 = le32_to_cpu(sgl->word2);
3290 bf_set(lpfc_sli4_sge_last, sgl, 0);
3291 sgl->word2 = cpu_to_le32(sgl->word2);
3292
3293 sgl += 1;
3294 lpfc_cmd->seg_cnt = datasegcnt;
3295
3296
3297 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3298 !phba->cfg_xpsgl) {
3299 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3300 ret = 2;
3301 goto err;
3302 }
3303
3304 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3305
3306 switch (prot_group_type) {
3307 case LPFC_PG_TYPE_NO_DIF:
3308
3309 if (((lpfc_cmd->seg_cnt + 1) >
3310 phba->cfg_total_seg_cnt) &&
3311 !phba->cfg_xpsgl) {
3312 ret = 2;
3313 goto err;
3314 }
3315
3316 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3317 datasegcnt, lpfc_cmd);
3318
3319
3320 if (num_sge < 2) {
3321 ret = 2;
3322 goto err;
3323 }
3324 break;
3325
3326 case LPFC_PG_TYPE_DIF_BUF:
3327
3328
3329
3330
3331
3332 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3333 scsi_prot_sglist(scsi_cmnd),
3334 scsi_prot_sg_count(scsi_cmnd), datadir);
3335 if (unlikely(!protsegcnt)) {
3336 scsi_dma_unmap(scsi_cmnd);
3337 return 1;
3338 }
3339
3340 lpfc_cmd->prot_seg_cnt = protsegcnt;
3341
3342
3343
3344
3345 if (((lpfc_cmd->prot_seg_cnt * 3) >
3346 (phba->cfg_total_seg_cnt - 2)) &&
3347 !phba->cfg_xpsgl) {
3348 ret = 2;
3349 goto err;
3350 }
3351
3352 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3353 datasegcnt, protsegcnt, lpfc_cmd);
3354
3355
3356 if (num_sge < 3 ||
3357 (num_sge > phba->cfg_total_seg_cnt &&
3358 !phba->cfg_xpsgl)) {
3359 ret = 2;
3360 goto err;
3361 }
3362 break;
3363
3364 case LPFC_PG_TYPE_INVALID:
3365 default:
3366 scsi_dma_unmap(scsi_cmnd);
3367 lpfc_cmd->seg_cnt = 0;
3368
3369 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3370 "9083 Unexpected protection group %i\n",
3371 prot_group_type);
3372 return 2;
3373 }
3374 }
3375
3376 switch (scsi_get_prot_op(scsi_cmnd)) {
3377 case SCSI_PROT_WRITE_STRIP:
3378 case SCSI_PROT_READ_STRIP:
3379 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3380 break;
3381 case SCSI_PROT_WRITE_INSERT:
3382 case SCSI_PROT_READ_INSERT:
3383 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3384 break;
3385 case SCSI_PROT_WRITE_PASS:
3386 case SCSI_PROT_READ_PASS:
3387 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3388 break;
3389 }
3390
3391 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3392 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3393
3394
3395
3396
3397
3398 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3399
3400
3401
3402
3403
3404 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3405 (fcpdl < vport->cfg_first_burst_size))
3406 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3407
3408
3409
3410
3411
3412 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3413 scsi_cmnd->device->hostdata)->oas_enabled)
3414 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3415
3416 return 0;
3417 err:
3418 if (lpfc_cmd->seg_cnt)
3419 scsi_dma_unmap(scsi_cmnd);
3420 if (lpfc_cmd->prot_seg_cnt)
3421 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3422 scsi_prot_sg_count(scsi_cmnd),
3423 scsi_cmnd->sc_data_direction);
3424
3425 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3426 "9084 Cannot setup S/G List for HBA"
3427 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3428 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3429 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3430 prot_group_type, num_sge);
3431
3432 lpfc_cmd->seg_cnt = 0;
3433 lpfc_cmd->prot_seg_cnt = 0;
3434 return ret;
3435 }
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449 static inline int
3450 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3451 {
3452 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3453 }
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468 static inline int
3469 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3470 {
3471 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3472 }
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484 static void
3485 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3486 struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3487 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3488 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3489 uint32_t resp_info = fcprsp->rspStatus2;
3490 uint32_t scsi_status = fcprsp->rspStatus3;
3491 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3492 struct lpfc_fast_path_event *fast_path_evt = NULL;
3493 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3494 unsigned long flags;
3495
3496 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3497 return;
3498
3499
3500 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3501 (cmnd->result == SAM_STAT_BUSY)) {
3502 fast_path_evt = lpfc_alloc_fast_evt(phba);
3503 if (!fast_path_evt)
3504 return;
3505 fast_path_evt->un.scsi_evt.event_type =
3506 FC_REG_SCSI_EVENT;
3507 fast_path_evt->un.scsi_evt.subcategory =
3508 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3509 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3510 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3511 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3512 &pnode->nlp_portname, sizeof(struct lpfc_name));
3513 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3514 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3515 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3516 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3517 fast_path_evt = lpfc_alloc_fast_evt(phba);
3518 if (!fast_path_evt)
3519 return;
3520 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3521 FC_REG_SCSI_EVENT;
3522 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3523 LPFC_EVENT_CHECK_COND;
3524 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3525 cmnd->device->lun;
3526 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3527 &pnode->nlp_portname, sizeof(struct lpfc_name));
3528 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3529 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3530 fast_path_evt->un.check_cond_evt.sense_key =
3531 cmnd->sense_buffer[2] & 0xf;
3532 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3533 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3534 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3535 fcpi_parm &&
3536 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3537 ((scsi_status == SAM_STAT_GOOD) &&
3538 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3539
3540
3541
3542
3543 fast_path_evt = lpfc_alloc_fast_evt(phba);
3544 if (!fast_path_evt)
3545 return;
3546 fast_path_evt->un.read_check_error.header.event_type =
3547 FC_REG_FABRIC_EVENT;
3548 fast_path_evt->un.read_check_error.header.subcategory =
3549 LPFC_EVENT_FCPRDCHKERR;
3550 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3551 &pnode->nlp_portname, sizeof(struct lpfc_name));
3552 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3553 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3554 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3555 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3556 fast_path_evt->un.read_check_error.fcpiparam =
3557 fcpi_parm;
3558 } else
3559 return;
3560
3561 fast_path_evt->vport = vport;
3562 spin_lock_irqsave(&phba->hbalock, flags);
3563 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3564 spin_unlock_irqrestore(&phba->hbalock, flags);
3565 lpfc_worker_wake_up(phba);
3566 return;
3567 }
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577 static void
3578 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3579 {
3580
3581
3582
3583
3584
3585
3586 if (psb->seg_cnt > 0)
3587 scsi_dma_unmap(psb->pCmd);
3588 if (psb->prot_seg_cnt > 0)
3589 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3590 scsi_prot_sg_count(psb->pCmd),
3591 psb->pCmd->sc_data_direction);
3592 }
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604 static void
3605 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3606 struct lpfc_iocbq *rsp_iocb)
3607 {
3608 struct lpfc_hba *phba = vport->phba;
3609 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3610 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3611 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3612 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3613 uint32_t resp_info = fcprsp->rspStatus2;
3614 uint32_t scsi_status = fcprsp->rspStatus3;
3615 uint32_t *lp;
3616 uint32_t host_status = DID_OK;
3617 uint32_t rsplen = 0;
3618 uint32_t fcpDl;
3619 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3620
3621
3622
3623
3624
3625
3626
3627 if (fcpcmd->fcpCntl2) {
3628 scsi_status = 0;
3629 goto out;
3630 }
3631
3632 if (resp_info & RSP_LEN_VALID) {
3633 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3634 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3635 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3636 "2719 Invalid response length: "
3637 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3638 cmnd->device->id,
3639 cmnd->device->lun, cmnd->cmnd[0],
3640 rsplen);
3641 host_status = DID_ERROR;
3642 goto out;
3643 }
3644 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3645 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3646 "2757 Protocol failure detected during "
3647 "processing of FCP I/O op: "
3648 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3649 cmnd->device->id,
3650 cmnd->device->lun, cmnd->cmnd[0],
3651 fcprsp->rspInfo3);
3652 host_status = DID_ERROR;
3653 goto out;
3654 }
3655 }
3656
3657 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3658 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3659 if (snslen > SCSI_SENSE_BUFFERSIZE)
3660 snslen = SCSI_SENSE_BUFFERSIZE;
3661
3662 if (resp_info & RSP_LEN_VALID)
3663 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3664 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3665 }
3666 lp = (uint32_t *)cmnd->sense_buffer;
3667
3668
3669 if (!scsi_status && (resp_info & RESID_UNDER)) {
3670
3671 if (vport->cfg_log_verbose & LOG_FCP)
3672 logit = LOG_FCP_ERROR;
3673
3674 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3675 logit = LOG_FCP_UNDER;
3676 }
3677
3678 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3679 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3680 "Data: x%x x%x x%x x%x x%x\n",
3681 cmnd->cmnd[0], scsi_status,
3682 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3683 be32_to_cpu(fcprsp->rspResId),
3684 be32_to_cpu(fcprsp->rspSnsLen),
3685 be32_to_cpu(fcprsp->rspRspLen),
3686 fcprsp->rspInfo3);
3687
3688 scsi_set_resid(cmnd, 0);
3689 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3690 if (resp_info & RESID_UNDER) {
3691 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3692
3693 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3694 "9025 FCP Underrun, expected %d, "
3695 "residual %d Data: x%x x%x x%x\n",
3696 fcpDl,
3697 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3698 cmnd->underflow);
3699
3700
3701
3702
3703
3704
3705 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3706 lpfc_printf_vlog(vport, KERN_WARNING,
3707 LOG_FCP | LOG_FCP_ERROR,
3708 "9026 FCP Read Check Error "
3709 "and Underrun Data: x%x x%x x%x x%x\n",
3710 fcpDl,
3711 scsi_get_resid(cmnd), fcpi_parm,
3712 cmnd->cmnd[0]);
3713 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3714 host_status = DID_ERROR;
3715 }
3716
3717
3718
3719
3720
3721
3722 if (!(resp_info & SNS_LEN_VALID) &&
3723 (scsi_status == SAM_STAT_GOOD) &&
3724 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3725 < cmnd->underflow)) {
3726 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3727 "9027 FCP command x%x residual "
3728 "underrun converted to error "
3729 "Data: x%x x%x x%x\n",
3730 cmnd->cmnd[0], scsi_bufflen(cmnd),
3731 scsi_get_resid(cmnd), cmnd->underflow);
3732 host_status = DID_ERROR;
3733 }
3734 } else if (resp_info & RESID_OVER) {
3735 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3736 "9028 FCP command x%x residual overrun error. "
3737 "Data: x%x x%x\n", cmnd->cmnd[0],
3738 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3739 host_status = DID_ERROR;
3740
3741
3742
3743
3744
3745 } else if (fcpi_parm) {
3746 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3747 "9029 FCP %s Check Error xri x%x Data: "
3748 "x%x x%x x%x x%x x%x\n",
3749 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3750 "Read" : "Write"),
3751 ((phba->sli_rev == LPFC_SLI_REV4) ?
3752 lpfc_cmd->cur_iocbq.sli4_xritag :
3753 rsp_iocb->iocb.ulpContext),
3754 fcpDl, be32_to_cpu(fcprsp->rspResId),
3755 fcpi_parm, cmnd->cmnd[0], scsi_status);
3756
3757
3758
3759
3760
3761 if (fcpi_parm > fcpDl)
3762 goto out;
3763
3764 switch (scsi_status) {
3765 case SAM_STAT_GOOD:
3766 case SAM_STAT_CHECK_CONDITION:
3767
3768
3769
3770
3771
3772 host_status = DID_ERROR;
3773 break;
3774 }
3775 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3776 }
3777
3778 out:
3779 cmnd->result = host_status << 16 | scsi_status;
3780 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3781 }
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793 static void
3794 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3795 struct lpfc_iocbq *pIocbOut)
3796 {
3797 struct lpfc_io_buf *lpfc_cmd =
3798 (struct lpfc_io_buf *) pIocbIn->context1;
3799 struct lpfc_vport *vport = pIocbIn->vport;
3800 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3801 struct lpfc_nodelist *pnode = rdata->pnode;
3802 struct scsi_cmnd *cmd;
3803 unsigned long flags;
3804 struct lpfc_fast_path_event *fast_path_evt;
3805 struct Scsi_Host *shost;
3806 int idx;
3807 uint32_t logit = LOG_FCP;
3808 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3809 int cpu;
3810 #endif
3811
3812
3813 spin_lock(&lpfc_cmd->buf_lock);
3814
3815
3816 cmd = lpfc_cmd->pCmd;
3817 if (!cmd) {
3818 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3819 "2621 IO completion: Not an active IO\n");
3820 spin_unlock(&lpfc_cmd->buf_lock);
3821 return;
3822 }
3823
3824 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3825 if (phba->sli4_hba.hdwq)
3826 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3827
3828 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3829 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
3830 cpu = raw_smp_processor_id();
3831 if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
3832 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
3833 }
3834 #endif
3835 shost = cmd->device->host;
3836
3837 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3838 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3839
3840 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
3841 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
3842 else
3843 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
3844
3845 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3846 if (lpfc_cmd->prot_data_type) {
3847 struct scsi_dif_tuple *src = NULL;
3848
3849 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3850
3851
3852
3853
3854 switch (lpfc_cmd->prot_data_type) {
3855 case LPFC_INJERR_REFTAG:
3856 src->ref_tag =
3857 lpfc_cmd->prot_data;
3858 break;
3859 case LPFC_INJERR_APPTAG:
3860 src->app_tag =
3861 (uint16_t)lpfc_cmd->prot_data;
3862 break;
3863 case LPFC_INJERR_GUARD:
3864 src->guard_tag =
3865 (uint16_t)lpfc_cmd->prot_data;
3866 break;
3867 default:
3868 break;
3869 }
3870
3871 lpfc_cmd->prot_data = 0;
3872 lpfc_cmd->prot_data_type = 0;
3873 lpfc_cmd->prot_data_segment = NULL;
3874 }
3875 #endif
3876
3877 if (lpfc_cmd->status) {
3878 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3879 (lpfc_cmd->result & IOERR_DRVR_MASK))
3880 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3881 else if (lpfc_cmd->status >= IOSTAT_CNT)
3882 lpfc_cmd->status = IOSTAT_DEFAULT;
3883 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3884 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3885 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3886 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3887 logit = 0;
3888 else
3889 logit = LOG_FCP | LOG_FCP_UNDER;
3890 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3891 "9030 FCP cmd x%x failed <%d/%lld> "
3892 "status: x%x result: x%x "
3893 "sid: x%x did: x%x oxid: x%x "
3894 "Data: x%x x%x\n",
3895 cmd->cmnd[0],
3896 cmd->device ? cmd->device->id : 0xffff,
3897 cmd->device ? cmd->device->lun : 0xffff,
3898 lpfc_cmd->status, lpfc_cmd->result,
3899 vport->fc_myDID,
3900 (pnode) ? pnode->nlp_DID : 0,
3901 phba->sli_rev == LPFC_SLI_REV4 ?
3902 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3903 pIocbOut->iocb.ulpContext,
3904 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3905
3906 switch (lpfc_cmd->status) {
3907 case IOSTAT_FCP_RSP_ERROR:
3908
3909 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3910 break;
3911 case IOSTAT_NPORT_BSY:
3912 case IOSTAT_FABRIC_BSY:
3913 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3914 fast_path_evt = lpfc_alloc_fast_evt(phba);
3915 if (!fast_path_evt)
3916 break;
3917 fast_path_evt->un.fabric_evt.event_type =
3918 FC_REG_FABRIC_EVENT;
3919 fast_path_evt->un.fabric_evt.subcategory =
3920 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3921 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3922 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3923 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3924 &pnode->nlp_portname,
3925 sizeof(struct lpfc_name));
3926 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3927 &pnode->nlp_nodename,
3928 sizeof(struct lpfc_name));
3929 }
3930 fast_path_evt->vport = vport;
3931 fast_path_evt->work_evt.evt =
3932 LPFC_EVT_FASTPATH_MGMT_EVT;
3933 spin_lock_irqsave(&phba->hbalock, flags);
3934 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3935 &phba->work_list);
3936 spin_unlock_irqrestore(&phba->hbalock, flags);
3937 lpfc_worker_wake_up(phba);
3938 break;
3939 case IOSTAT_LOCAL_REJECT:
3940 case IOSTAT_REMOTE_STOP:
3941 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3942 lpfc_cmd->result ==
3943 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3944 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3945 lpfc_cmd->result ==
3946 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3947 cmd->result = DID_NO_CONNECT << 16;
3948 break;
3949 }
3950 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3951 lpfc_cmd->result == IOERR_NO_RESOURCES ||
3952 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3953 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3954 cmd->result = DID_REQUEUE << 16;
3955 break;
3956 }
3957 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3958 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3959 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3960 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3961
3962
3963
3964
3965 lpfc_parse_bg_err(phba, lpfc_cmd,
3966 pIocbOut);
3967 break;
3968 } else {
3969 lpfc_printf_vlog(vport, KERN_WARNING,
3970 LOG_BG,
3971 "9031 non-zero BGSTAT "
3972 "on unprotected cmd\n");
3973 }
3974 }
3975 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3976 && (phba->sli_rev == LPFC_SLI_REV4)
3977 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3978
3979
3980
3981
3982 lpfc_set_rrq_active(phba, pnode,
3983 lpfc_cmd->cur_iocbq.sli4_lxritag,
3984 0, 0);
3985 }
3986
3987 default:
3988 cmd->result = DID_ERROR << 16;
3989 break;
3990 }
3991
3992 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3993 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3994 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3995 SAM_STAT_BUSY;
3996 } else
3997 cmd->result = DID_OK << 16;
3998
3999 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4000 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4001
4002 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4003 "0710 Iodone <%d/%llu> cmd x%px, error "
4004 "x%x SNS x%x x%x Data: x%x x%x\n",
4005 cmd->device->id, cmd->device->lun, cmd,
4006 cmd->result, *lp, *(lp + 3), cmd->retries,
4007 scsi_get_resid(cmd));
4008 }
4009
4010 lpfc_update_stats(phba, lpfc_cmd);
4011 if (vport->cfg_max_scsicmpl_time &&
4012 time_after(jiffies, lpfc_cmd->start_time +
4013 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4014 spin_lock_irqsave(shost->host_lock, flags);
4015 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4016 if (pnode->cmd_qdepth >
4017 atomic_read(&pnode->cmd_pending) &&
4018 (atomic_read(&pnode->cmd_pending) >
4019 LPFC_MIN_TGT_QDEPTH) &&
4020 ((cmd->cmnd[0] == READ_10) ||
4021 (cmd->cmnd[0] == WRITE_10)))
4022 pnode->cmd_qdepth =
4023 atomic_read(&pnode->cmd_pending);
4024
4025 pnode->last_change_time = jiffies;
4026 }
4027 spin_unlock_irqrestore(shost->host_lock, flags);
4028 }
4029 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4030
4031 lpfc_cmd->pCmd = NULL;
4032 spin_unlock(&lpfc_cmd->buf_lock);
4033
4034
4035 cmd->scsi_done(cmd);
4036
4037
4038
4039
4040
4041 spin_lock(&lpfc_cmd->buf_lock);
4042 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4043 if (lpfc_cmd->waitq)
4044 wake_up(lpfc_cmd->waitq);
4045 spin_unlock(&lpfc_cmd->buf_lock);
4046
4047 lpfc_release_scsi_buf(phba, lpfc_cmd);
4048 }
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058 static void
4059 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4060 {
4061 int i, j;
4062 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4063 i += sizeof(uint32_t), j++) {
4064 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4065 }
4066 }
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077 static void
4078 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4079 struct lpfc_nodelist *pnode)
4080 {
4081 struct lpfc_hba *phba = vport->phba;
4082 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4083 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4084 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4085 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4086 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4087 int datadir = scsi_cmnd->sc_data_direction;
4088 int idx;
4089 uint8_t *ptr;
4090 bool sli4;
4091 uint32_t fcpdl;
4092
4093 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4094 return;
4095
4096 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4097
4098 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4099
4100 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4101 &lpfc_cmd->fcp_cmnd->fcp_lun);
4102
4103 ptr = &fcp_cmnd->fcpCdb[0];
4104 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4105 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4106 ptr += scsi_cmnd->cmd_len;
4107 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4108 }
4109
4110 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4111
4112 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4113 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4114 idx = lpfc_cmd->hdwq_no;
4115 if (phba->sli4_hba.hdwq)
4116 hdwq = &phba->sli4_hba.hdwq[idx];
4117
4118
4119
4120
4121
4122
4123
4124 if (scsi_sg_count(scsi_cmnd)) {
4125 if (datadir == DMA_TO_DEVICE) {
4126 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4127 iocb_cmd->ulpPU = PARM_READ_CHECK;
4128 if (vport->cfg_first_burst_size &&
4129 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4130 fcpdl = scsi_bufflen(scsi_cmnd);
4131 if (fcpdl < vport->cfg_first_burst_size)
4132 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4133 else
4134 piocbq->iocb.un.fcpi.fcpi_XRdy =
4135 vport->cfg_first_burst_size;
4136 }
4137 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4138 if (hdwq)
4139 hdwq->scsi_cstat.output_requests++;
4140 } else {
4141 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4142 iocb_cmd->ulpPU = PARM_READ_CHECK;
4143 fcp_cmnd->fcpCntl3 = READ_DATA;
4144 if (hdwq)
4145 hdwq->scsi_cstat.input_requests++;
4146 }
4147 } else {
4148 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4149 iocb_cmd->un.fcpi.fcpi_parm = 0;
4150 iocb_cmd->ulpPU = 0;
4151 fcp_cmnd->fcpCntl3 = 0;
4152 if (hdwq)
4153 hdwq->scsi_cstat.control_requests++;
4154 }
4155 if (phba->sli_rev == 3 &&
4156 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4157 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4158
4159
4160
4161
4162 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4163 if (sli4)
4164 piocbq->iocb.ulpContext =
4165 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4166 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4167 piocbq->iocb.ulpFCP2Rcvy = 1;
4168 else
4169 piocbq->iocb.ulpFCP2Rcvy = 0;
4170
4171 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4172 piocbq->context1 = lpfc_cmd;
4173 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4174 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4175 piocbq->vport = vport;
4176 }
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192 static int
4193 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4194 struct lpfc_io_buf *lpfc_cmd,
4195 uint64_t lun,
4196 uint8_t task_mgmt_cmd)
4197 {
4198 struct lpfc_iocbq *piocbq;
4199 IOCB_t *piocb;
4200 struct fcp_cmnd *fcp_cmnd;
4201 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4202 struct lpfc_nodelist *ndlp = rdata->pnode;
4203
4204 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4205 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4206 return 0;
4207
4208 piocbq = &(lpfc_cmd->cur_iocbq);
4209 piocbq->vport = vport;
4210
4211 piocb = &piocbq->iocb;
4212
4213 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4214
4215 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4216 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4217 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4218 if (vport->phba->sli_rev == 3 &&
4219 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4220 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4221 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4222 piocb->ulpContext = ndlp->nlp_rpi;
4223 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4224 piocb->ulpContext =
4225 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4226 }
4227 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4228 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4229 piocb->ulpPU = 0;
4230 piocb->un.fcpi.fcpi_parm = 0;
4231
4232
4233 if (lpfc_cmd->timeout > 0xff) {
4234
4235
4236
4237
4238 piocb->ulpTimeout = 0;
4239 } else
4240 piocb->ulpTimeout = lpfc_cmd->timeout;
4241
4242 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4243 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4244
4245 return 1;
4246 }
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257 int
4258 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4259 {
4260
4261 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4262 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4263
4264 switch (dev_grp) {
4265 case LPFC_PCI_DEV_LP:
4266 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4267 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4268 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4269 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4270 break;
4271 case LPFC_PCI_DEV_OC:
4272 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4273 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4274 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4275 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4276 break;
4277 default:
4278 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4279 "1418 Invalid HBA PCI-device group: 0x%x\n",
4280 dev_grp);
4281 return -ENODEV;
4282 break;
4283 }
4284 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4285 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4286 return 0;
4287 }
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298 static void
4299 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4300 struct lpfc_iocbq *cmdiocbq,
4301 struct lpfc_iocbq *rspiocbq)
4302 {
4303 struct lpfc_io_buf *lpfc_cmd =
4304 (struct lpfc_io_buf *) cmdiocbq->context1;
4305 if (lpfc_cmd)
4306 lpfc_release_scsi_buf(phba, lpfc_cmd);
4307 return;
4308 }
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324 int
4325 lpfc_check_pci_resettable(const struct lpfc_hba *phba)
4326 {
4327 const struct pci_dev *pdev = phba->pcidev;
4328 struct pci_dev *ptr = NULL;
4329 u8 counter = 0;
4330
4331
4332 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4333
4334 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4336 "8346 Non-Emulex vendor found: "
4337 "0x%04x\n", ptr->vendor);
4338 return -EBADSLT;
4339 }
4340
4341
4342 switch (ptr->device) {
4343 case PCI_DEVICE_ID_LANCER_FC:
4344 case PCI_DEVICE_ID_LANCER_G6_FC:
4345 case PCI_DEVICE_ID_LANCER_G7_FC:
4346 break;
4347 default:
4348 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4349 "8347 Invalid device found: "
4350 "0x%04x\n", ptr->device);
4351 return -EBADSLT;
4352 }
4353
4354
4355
4356
4357 if (ptr->devfn == 0) {
4358 if (++counter > 1) {
4359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4360 "8348 More than one device on "
4361 "secondary bus found\n");
4362 return -EBADSLT;
4363 }
4364 }
4365 }
4366
4367 return 0;
4368 }
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379 const char *
4380 lpfc_info(struct Scsi_Host *host)
4381 {
4382 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4383 struct lpfc_hba *phba = vport->phba;
4384 int link_speed = 0;
4385 static char lpfcinfobuf[384];
4386 char tmp[384] = {0};
4387
4388 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4389 if (phba && phba->pcidev){
4390
4391 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4392 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4393 sizeof(lpfcinfobuf))
4394 goto buffer_done;
4395
4396
4397 scnprintf(tmp, sizeof(tmp),
4398 " on PCI bus %02x device %02x irq %d",
4399 phba->pcidev->bus->number, phba->pcidev->devfn,
4400 phba->pcidev->irq);
4401 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4402 sizeof(lpfcinfobuf))
4403 goto buffer_done;
4404
4405
4406 if (phba->Port[0]) {
4407 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4408 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4409 sizeof(lpfcinfobuf))
4410 goto buffer_done;
4411 }
4412
4413
4414 link_speed = lpfc_sli_port_speed_get(phba);
4415 if (link_speed != 0) {
4416 scnprintf(tmp, sizeof(tmp),
4417 " Logical Link Speed: %d Mbps", link_speed);
4418 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4419 sizeof(lpfcinfobuf))
4420 goto buffer_done;
4421 }
4422
4423
4424 if (!lpfc_check_pci_resettable(phba)) {
4425 scnprintf(tmp, sizeof(tmp), " PCI resettable");
4426 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4427 }
4428 }
4429
4430 buffer_done:
4431 return lpfcinfobuf;
4432 }
4433
4434
4435
4436
4437
4438
4439
4440
4441 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4442 {
4443 unsigned long poll_tmo_expires =
4444 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4445
4446 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4447 mod_timer(&phba->fcp_poll_timer,
4448 poll_tmo_expires);
4449 }
4450
4451
4452
4453
4454
4455
4456
4457 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4458 {
4459 lpfc_poll_rearm_timer(phba);
4460 }
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470 void lpfc_poll_timeout(struct timer_list *t)
4471 {
4472 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4473
4474 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4475 lpfc_sli_handle_fast_ring_event(phba,
4476 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4477
4478 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4479 lpfc_poll_rearm_timer(phba);
4480 }
4481 }
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496 static int
4497 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4498 {
4499 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4500 struct lpfc_hba *phba = vport->phba;
4501 struct lpfc_rport_data *rdata;
4502 struct lpfc_nodelist *ndlp;
4503 struct lpfc_io_buf *lpfc_cmd;
4504 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4505 int err, idx;
4506 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4507 int cpu;
4508 #endif
4509
4510 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4511
4512
4513 if (unlikely(!rdata) || unlikely(!rport))
4514 goto out_fail_command;
4515
4516 err = fc_remote_port_chkready(rport);
4517 if (err) {
4518 cmnd->result = err;
4519 goto out_fail_command;
4520 }
4521 ndlp = rdata->pnode;
4522
4523 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4524 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4525
4526 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4527 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4528 " op:%02x str=%s without registering for"
4529 " BlockGuard - Rejecting command\n",
4530 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4531 dif_op_str[scsi_get_prot_op(cmnd)]);
4532 goto out_fail_command;
4533 }
4534
4535
4536
4537
4538
4539 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4540 goto out_tgt_busy;
4541 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4542 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4543 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4544 "3377 Target Queue Full, scsi Id:%d "
4545 "Qdepth:%d Pending command:%d"
4546 " WWNN:%02x:%02x:%02x:%02x:"
4547 "%02x:%02x:%02x:%02x, "
4548 " WWPN:%02x:%02x:%02x:%02x:"
4549 "%02x:%02x:%02x:%02x",
4550 ndlp->nlp_sid, ndlp->cmd_qdepth,
4551 atomic_read(&ndlp->cmd_pending),
4552 ndlp->nlp_nodename.u.wwn[0],
4553 ndlp->nlp_nodename.u.wwn[1],
4554 ndlp->nlp_nodename.u.wwn[2],
4555 ndlp->nlp_nodename.u.wwn[3],
4556 ndlp->nlp_nodename.u.wwn[4],
4557 ndlp->nlp_nodename.u.wwn[5],
4558 ndlp->nlp_nodename.u.wwn[6],
4559 ndlp->nlp_nodename.u.wwn[7],
4560 ndlp->nlp_portname.u.wwn[0],
4561 ndlp->nlp_portname.u.wwn[1],
4562 ndlp->nlp_portname.u.wwn[2],
4563 ndlp->nlp_portname.u.wwn[3],
4564 ndlp->nlp_portname.u.wwn[4],
4565 ndlp->nlp_portname.u.wwn[5],
4566 ndlp->nlp_portname.u.wwn[6],
4567 ndlp->nlp_portname.u.wwn[7]);
4568 goto out_tgt_busy;
4569 }
4570 }
4571
4572 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4573 if (lpfc_cmd == NULL) {
4574 lpfc_rampdown_queue_depth(phba);
4575
4576 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4577 "0707 driver's buffer pool is empty, "
4578 "IO busied\n");
4579 goto out_host_busy;
4580 }
4581
4582
4583
4584
4585
4586 lpfc_cmd->pCmd = cmnd;
4587 lpfc_cmd->rdata = rdata;
4588 lpfc_cmd->ndlp = ndlp;
4589 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4590
4591 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4592 if (vport->phba->cfg_enable_bg) {
4593 lpfc_printf_vlog(vport,
4594 KERN_INFO, LOG_SCSI_CMD,
4595 "9033 BLKGRD: rcvd %s cmd:x%x "
4596 "sector x%llx cnt %u pt %x\n",
4597 dif_op_str[scsi_get_prot_op(cmnd)],
4598 cmnd->cmnd[0],
4599 (unsigned long long)scsi_get_lba(cmnd),
4600 blk_rq_sectors(cmnd->request),
4601 (cmnd->cmnd[1]>>5));
4602 }
4603 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4604 } else {
4605 if (vport->phba->cfg_enable_bg) {
4606 lpfc_printf_vlog(vport,
4607 KERN_INFO, LOG_SCSI_CMD,
4608 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4609 "x%x sector x%llx cnt %u pt %x\n",
4610 cmnd->cmnd[0],
4611 (unsigned long long)scsi_get_lba(cmnd),
4612 blk_rq_sectors(cmnd->request),
4613 (cmnd->cmnd[1]>>5));
4614 }
4615 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4616 }
4617
4618 if (err == 2) {
4619 cmnd->result = DID_ERROR << 16;
4620 goto out_fail_command_release_buf;
4621 } else if (err) {
4622 goto out_host_busy_free_buf;
4623 }
4624
4625 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4626
4627 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4628 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
4629 cpu = raw_smp_processor_id();
4630 if (cpu < LPFC_CHECK_CPU_CNT) {
4631 struct lpfc_sli4_hdw_queue *hdwq =
4632 &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
4633 hdwq->cpucheck_xmt_io[cpu]++;
4634 }
4635 }
4636 #endif
4637 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4638 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4639 if (err) {
4640 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4641 "3376 FCP could not issue IOCB err %x"
4642 "FCP cmd x%x <%d/%llu> "
4643 "sid: x%x did: x%x oxid: x%x "
4644 "Data: x%x x%x x%x x%x\n",
4645 err, cmnd->cmnd[0],
4646 cmnd->device ? cmnd->device->id : 0xffff,
4647 cmnd->device ? cmnd->device->lun : (u64) -1,
4648 vport->fc_myDID, ndlp->nlp_DID,
4649 phba->sli_rev == LPFC_SLI_REV4 ?
4650 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4651 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4652 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4653 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4654 (uint32_t)
4655 (cmnd->request->timeout / 1000));
4656
4657 goto out_host_busy_free_buf;
4658 }
4659 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4660 lpfc_sli_handle_fast_ring_event(phba,
4661 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4662
4663 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4664 lpfc_poll_rearm_timer(phba);
4665 }
4666
4667 if (phba->cfg_xri_rebalancing)
4668 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4669
4670 return 0;
4671
4672 out_host_busy_free_buf:
4673 idx = lpfc_cmd->hdwq_no;
4674 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4675 if (phba->sli4_hba.hdwq) {
4676 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4677 case WRITE_DATA:
4678 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4679 break;
4680 case READ_DATA:
4681 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4682 break;
4683 default:
4684 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4685 }
4686 }
4687 lpfc_release_scsi_buf(phba, lpfc_cmd);
4688 out_host_busy:
4689 return SCSI_MLQUEUE_HOST_BUSY;
4690
4691 out_tgt_busy:
4692 return SCSI_MLQUEUE_TARGET_BUSY;
4693
4694 out_fail_command_release_buf:
4695 lpfc_release_scsi_buf(phba, lpfc_cmd);
4696
4697 out_fail_command:
4698 cmnd->scsi_done(cmnd);
4699 return 0;
4700 }
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713 static int
4714 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4715 {
4716 struct Scsi_Host *shost = cmnd->device->host;
4717 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4718 struct lpfc_hba *phba = vport->phba;
4719 struct lpfc_iocbq *iocb;
4720 struct lpfc_iocbq *abtsiocb;
4721 struct lpfc_io_buf *lpfc_cmd;
4722 IOCB_t *cmd, *icmd;
4723 int ret = SUCCESS, status = 0;
4724 struct lpfc_sli_ring *pring_s4 = NULL;
4725 int ret_val;
4726 unsigned long flags;
4727 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4728
4729 status = fc_block_scsi_eh(cmnd);
4730 if (status != 0 && status != SUCCESS)
4731 return status;
4732
4733 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4734 if (!lpfc_cmd)
4735 return ret;
4736
4737 spin_lock_irqsave(&phba->hbalock, flags);
4738
4739 if (phba->hba_flag & HBA_IOQ_FLUSH) {
4740 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4741 "3168 SCSI Layer abort requested I/O has been "
4742 "flushed by LLD.\n");
4743 ret = FAILED;
4744 goto out_unlock;
4745 }
4746
4747
4748 spin_lock(&lpfc_cmd->buf_lock);
4749
4750 if (!lpfc_cmd->pCmd) {
4751 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4752 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4753 "x%x ID %d LUN %llu\n",
4754 SUCCESS, cmnd->device->id, cmnd->device->lun);
4755 goto out_unlock_buf;
4756 }
4757
4758 iocb = &lpfc_cmd->cur_iocbq;
4759 if (phba->sli_rev == LPFC_SLI_REV4) {
4760 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4761 if (!pring_s4) {
4762 ret = FAILED;
4763 goto out_unlock_buf;
4764 }
4765 spin_lock(&pring_s4->ring_lock);
4766 }
4767
4768 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4769 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4770 "3169 SCSI Layer abort requested I/O has been "
4771 "cancelled by LLD.\n");
4772 ret = FAILED;
4773 goto out_unlock_ring;
4774 }
4775
4776
4777
4778
4779
4780
4781 if (lpfc_cmd->pCmd != cmnd) {
4782 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4783 "3170 SCSI Layer abort requested I/O has been "
4784 "completed by LLD.\n");
4785 goto out_unlock_ring;
4786 }
4787
4788 BUG_ON(iocb->context1 != lpfc_cmd);
4789
4790
4791 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4792 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4793 "3389 SCSI Layer I/O Abort Request is pending\n");
4794 if (phba->sli_rev == LPFC_SLI_REV4)
4795 spin_unlock(&pring_s4->ring_lock);
4796 spin_unlock(&lpfc_cmd->buf_lock);
4797 spin_unlock_irqrestore(&phba->hbalock, flags);
4798 goto wait_for_cmpl;
4799 }
4800
4801 abtsiocb = __lpfc_sli_get_iocbq(phba);
4802 if (abtsiocb == NULL) {
4803 ret = FAILED;
4804 goto out_unlock_ring;
4805 }
4806
4807
4808 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4809
4810
4811
4812
4813
4814
4815
4816 cmd = &iocb->iocb;
4817 icmd = &abtsiocb->iocb;
4818 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4819 icmd->un.acxri.abortContextTag = cmd->ulpContext;
4820 if (phba->sli_rev == LPFC_SLI_REV4)
4821 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4822 else
4823 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4824
4825 icmd->ulpLe = 1;
4826 icmd->ulpClass = cmd->ulpClass;
4827
4828
4829 abtsiocb->hba_wqidx = iocb->hba_wqidx;
4830 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4831 if (iocb->iocb_flag & LPFC_IO_FOF)
4832 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4833
4834 if (lpfc_is_link_up(phba))
4835 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4836 else
4837 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4838
4839 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4840 abtsiocb->vport = vport;
4841 lpfc_cmd->waitq = &waitq;
4842 if (phba->sli_rev == LPFC_SLI_REV4) {
4843
4844 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4845 abtsiocb, 0);
4846 spin_unlock(&pring_s4->ring_lock);
4847 } else {
4848 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4849 abtsiocb, 0);
4850 }
4851
4852 if (ret_val == IOCB_ERROR) {
4853
4854 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4855 lpfc_cmd->waitq = NULL;
4856 spin_unlock(&lpfc_cmd->buf_lock);
4857 spin_unlock_irqrestore(&phba->hbalock, flags);
4858 lpfc_sli_release_iocbq(phba, abtsiocb);
4859 ret = FAILED;
4860 goto out;
4861 }
4862
4863
4864 spin_unlock(&lpfc_cmd->buf_lock);
4865 spin_unlock_irqrestore(&phba->hbalock, flags);
4866
4867 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4868 lpfc_sli_handle_fast_ring_event(phba,
4869 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4870
4871 wait_for_cmpl:
4872
4873 wait_event_timeout(waitq,
4874 (lpfc_cmd->pCmd != cmnd),
4875 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4876
4877 spin_lock(&lpfc_cmd->buf_lock);
4878
4879 if (lpfc_cmd->pCmd == cmnd) {
4880 ret = FAILED;
4881 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4882 "0748 abort handler timed out waiting "
4883 "for aborting I/O (xri:x%x) to complete: "
4884 "ret %#x, ID %d, LUN %llu\n",
4885 iocb->sli4_xritag, ret,
4886 cmnd->device->id, cmnd->device->lun);
4887 }
4888
4889 lpfc_cmd->waitq = NULL;
4890
4891 spin_unlock(&lpfc_cmd->buf_lock);
4892 goto out;
4893
4894 out_unlock_ring:
4895 if (phba->sli_rev == LPFC_SLI_REV4)
4896 spin_unlock(&pring_s4->ring_lock);
4897 out_unlock_buf:
4898 spin_unlock(&lpfc_cmd->buf_lock);
4899 out_unlock:
4900 spin_unlock_irqrestore(&phba->hbalock, flags);
4901 out:
4902 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4903 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4904 "LUN %llu\n", ret, cmnd->device->id,
4905 cmnd->device->lun);
4906 return ret;
4907 }
4908
4909 static char *
4910 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4911 {
4912 switch (task_mgmt_cmd) {
4913 case FCP_ABORT_TASK_SET:
4914 return "ABORT_TASK_SET";
4915 case FCP_CLEAR_TASK_SET:
4916 return "FCP_CLEAR_TASK_SET";
4917 case FCP_BUS_RESET:
4918 return "FCP_BUS_RESET";
4919 case FCP_LUN_RESET:
4920 return "FCP_LUN_RESET";
4921 case FCP_TARGET_RESET:
4922 return "FCP_TARGET_RESET";
4923 case FCP_CLEAR_ACA:
4924 return "FCP_CLEAR_ACA";
4925 case FCP_TERMINATE_TASK:
4926 return "FCP_TERMINATE_TASK";
4927 default:
4928 return "unknown";
4929 }
4930 }
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944 static int
4945 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4946 {
4947 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4948 uint32_t rsp_info;
4949 uint32_t rsp_len;
4950 uint8_t rsp_info_code;
4951 int ret = FAILED;
4952
4953
4954 if (fcprsp == NULL)
4955 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4956 "0703 fcp_rsp is missing\n");
4957 else {
4958 rsp_info = fcprsp->rspStatus2;
4959 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4960 rsp_info_code = fcprsp->rspInfo3;
4961
4962
4963 lpfc_printf_vlog(vport, KERN_INFO,
4964 LOG_FCP,
4965 "0706 fcp_rsp valid 0x%x,"
4966 " rsp len=%d code 0x%x\n",
4967 rsp_info,
4968 rsp_len, rsp_info_code);
4969
4970
4971
4972
4973
4974 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4975 ((rsp_len == 8) || (rsp_len == 4))) {
4976 switch (rsp_info_code) {
4977 case RSP_NO_FAILURE:
4978 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4979 "0715 Task Mgmt No Failure\n");
4980 ret = SUCCESS;
4981 break;
4982 case RSP_TM_NOT_SUPPORTED:
4983 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4984 "0716 Task Mgmt Target "
4985 "reject\n");
4986 break;
4987 case RSP_TM_NOT_COMPLETED:
4988 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4989 "0717 Task Mgmt Target "
4990 "failed TM\n");
4991 break;
4992 case RSP_TM_INVALID_LU:
4993 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4994 "0718 Task Mgmt to invalid "
4995 "LUN\n");
4996 break;
4997 }
4998 }
4999 }
5000 return ret;
5001 }
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019 static int
5020 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5021 unsigned int tgt_id, uint64_t lun_id,
5022 uint8_t task_mgmt_cmd)
5023 {
5024 struct lpfc_hba *phba = vport->phba;
5025 struct lpfc_io_buf *lpfc_cmd;
5026 struct lpfc_iocbq *iocbq;
5027 struct lpfc_iocbq *iocbqrsp;
5028 struct lpfc_rport_data *rdata;
5029 struct lpfc_nodelist *pnode;
5030 int ret;
5031 int status;
5032
5033 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5034 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5035 return FAILED;
5036 pnode = rdata->pnode;
5037
5038 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5039 if (lpfc_cmd == NULL)
5040 return FAILED;
5041 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5042 lpfc_cmd->rdata = rdata;
5043 lpfc_cmd->pCmd = cmnd;
5044 lpfc_cmd->ndlp = pnode;
5045
5046 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5047 task_mgmt_cmd);
5048 if (!status) {
5049 lpfc_release_scsi_buf(phba, lpfc_cmd);
5050 return FAILED;
5051 }
5052
5053 iocbq = &lpfc_cmd->cur_iocbq;
5054 iocbqrsp = lpfc_sli_get_iocbq(phba);
5055 if (iocbqrsp == NULL) {
5056 lpfc_release_scsi_buf(phba, lpfc_cmd);
5057 return FAILED;
5058 }
5059 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5060
5061 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5062 "0702 Issue %s to TGT %d LUN %llu "
5063 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5064 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5065 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5066 iocbq->iocb_flag);
5067
5068 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5069 iocbq, iocbqrsp, lpfc_cmd->timeout);
5070 if ((status != IOCB_SUCCESS) ||
5071 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5072 if (status != IOCB_SUCCESS ||
5073 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5074 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5075 "0727 TMF %s to TGT %d LUN %llu "
5076 "failed (%d, %d) iocb_flag x%x\n",
5077 lpfc_taskmgmt_name(task_mgmt_cmd),
5078 tgt_id, lun_id,
5079 iocbqrsp->iocb.ulpStatus,
5080 iocbqrsp->iocb.un.ulpWord[4],
5081 iocbq->iocb_flag);
5082
5083 if (status == IOCB_SUCCESS) {
5084 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5085
5086
5087 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5088 else
5089 ret = FAILED;
5090 } else if (status == IOCB_TIMEDOUT) {
5091 ret = TIMEOUT_ERROR;
5092 } else {
5093 ret = FAILED;
5094 }
5095 } else
5096 ret = SUCCESS;
5097
5098 lpfc_sli_release_iocbq(phba, iocbqrsp);
5099
5100 if (ret != TIMEOUT_ERROR)
5101 lpfc_release_scsi_buf(phba, lpfc_cmd);
5102
5103 return ret;
5104 }
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118 static int
5119 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5120 {
5121 struct lpfc_rport_data *rdata;
5122 struct lpfc_nodelist *pnode;
5123 unsigned long later;
5124
5125 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5126 if (!rdata) {
5127 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5128 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5129 return FAILED;
5130 }
5131 pnode = rdata->pnode;
5132
5133
5134
5135
5136 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5137 while (time_after(later, jiffies)) {
5138 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5139 return FAILED;
5140 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5141 return SUCCESS;
5142 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5143 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5144 if (!rdata)
5145 return FAILED;
5146 pnode = rdata->pnode;
5147 }
5148 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5149 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5150 return FAILED;
5151 return SUCCESS;
5152 }
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170 static int
5171 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5172 uint64_t lun_id, lpfc_ctx_cmd context)
5173 {
5174 struct lpfc_hba *phba = vport->phba;
5175 unsigned long later;
5176 int cnt;
5177
5178 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5179 if (cnt)
5180 lpfc_sli_abort_taskmgmt(vport,
5181 &phba->sli.sli3_ring[LPFC_FCP_RING],
5182 tgt_id, lun_id, context);
5183 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5184 while (time_after(later, jiffies) && cnt) {
5185 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5186 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5187 }
5188 if (cnt) {
5189 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5190 "0724 I/O flush failure for context %s : cnt x%x\n",
5191 ((context == LPFC_CTX_LUN) ? "LUN" :
5192 ((context == LPFC_CTX_TGT) ? "TGT" :
5193 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5194 cnt);
5195 return FAILED;
5196 }
5197 return SUCCESS;
5198 }
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211 static int
5212 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5213 {
5214 struct Scsi_Host *shost = cmnd->device->host;
5215 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5216 struct lpfc_rport_data *rdata;
5217 struct lpfc_nodelist *pnode;
5218 unsigned tgt_id = cmnd->device->id;
5219 uint64_t lun_id = cmnd->device->lun;
5220 struct lpfc_scsi_event_header scsi_event;
5221 int status;
5222
5223 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5224 if (!rdata || !rdata->pnode) {
5225 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5226 "0798 Device Reset rdata failure: rdata x%px\n",
5227 rdata);
5228 return FAILED;
5229 }
5230 pnode = rdata->pnode;
5231 status = fc_block_scsi_eh(cmnd);
5232 if (status != 0 && status != SUCCESS)
5233 return status;
5234
5235 status = lpfc_chk_tgt_mapped(vport, cmnd);
5236 if (status == FAILED) {
5237 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5238 "0721 Device Reset rport failure: rdata x%px\n", rdata);
5239 return FAILED;
5240 }
5241
5242 scsi_event.event_type = FC_REG_SCSI_EVENT;
5243 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5244 scsi_event.lun = lun_id;
5245 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5246 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5247
5248 fc_host_post_vendor_event(shost, fc_get_event_number(),
5249 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5250
5251 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5252 FCP_LUN_RESET);
5253
5254 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5255 "0713 SCSI layer issued Device Reset (%d, %llu) "
5256 "return x%x\n", tgt_id, lun_id, status);
5257
5258
5259
5260
5261
5262
5263
5264 if (status == SUCCESS)
5265 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5266 LPFC_CTX_LUN);
5267
5268 return status;
5269 }
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282 static int
5283 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5284 {
5285 struct Scsi_Host *shost = cmnd->device->host;
5286 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5287 struct lpfc_rport_data *rdata;
5288 struct lpfc_nodelist *pnode;
5289 unsigned tgt_id = cmnd->device->id;
5290 uint64_t lun_id = cmnd->device->lun;
5291 struct lpfc_scsi_event_header scsi_event;
5292 int status;
5293
5294 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5295 if (!rdata || !rdata->pnode) {
5296 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5297 "0799 Target Reset rdata failure: rdata x%px\n",
5298 rdata);
5299 return FAILED;
5300 }
5301 pnode = rdata->pnode;
5302 status = fc_block_scsi_eh(cmnd);
5303 if (status != 0 && status != SUCCESS)
5304 return status;
5305
5306 status = lpfc_chk_tgt_mapped(vport, cmnd);
5307 if (status == FAILED) {
5308 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5309 "0722 Target Reset rport failure: rdata x%px\n", rdata);
5310 if (pnode) {
5311 spin_lock_irq(shost->host_lock);
5312 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5313 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5314 spin_unlock_irq(shost->host_lock);
5315 }
5316 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5317 LPFC_CTX_TGT);
5318 return FAST_IO_FAIL;
5319 }
5320
5321 scsi_event.event_type = FC_REG_SCSI_EVENT;
5322 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5323 scsi_event.lun = 0;
5324 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5325 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5326
5327 fc_host_post_vendor_event(shost, fc_get_event_number(),
5328 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5329
5330 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5331 FCP_TARGET_RESET);
5332
5333 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5334 "0723 SCSI layer issued Target Reset (%d, %llu) "
5335 "return x%x\n", tgt_id, lun_id, status);
5336
5337
5338
5339
5340
5341
5342
5343 if (status == SUCCESS)
5344 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5345 LPFC_CTX_TGT);
5346 return status;
5347 }
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360 static int
5361 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5362 {
5363 struct Scsi_Host *shost = cmnd->device->host;
5364 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5365 struct lpfc_nodelist *ndlp = NULL;
5366 struct lpfc_scsi_event_header scsi_event;
5367 int match;
5368 int ret = SUCCESS, status, i;
5369
5370 scsi_event.event_type = FC_REG_SCSI_EVENT;
5371 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5372 scsi_event.lun = 0;
5373 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5374 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5375
5376 fc_host_post_vendor_event(shost, fc_get_event_number(),
5377 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5378
5379 status = fc_block_scsi_eh(cmnd);
5380 if (status != 0 && status != SUCCESS)
5381 return status;
5382
5383
5384
5385
5386
5387
5388 for (i = 0; i < LPFC_MAX_TARGET; i++) {
5389
5390 match = 0;
5391 spin_lock_irq(shost->host_lock);
5392 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5393 if (!NLP_CHK_NODE_ACT(ndlp))
5394 continue;
5395 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5396 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5397 continue;
5398 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5399 ndlp->nlp_sid == i &&
5400 ndlp->rport &&
5401 ndlp->nlp_type & NLP_FCP_TARGET) {
5402 match = 1;
5403 break;
5404 }
5405 }
5406 spin_unlock_irq(shost->host_lock);
5407 if (!match)
5408 continue;
5409
5410 status = lpfc_send_taskmgmt(vport, cmnd,
5411 i, 0, FCP_TARGET_RESET);
5412
5413 if (status != SUCCESS) {
5414 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5415 "0700 Bus Reset on target %d failed\n",
5416 i);
5417 ret = FAILED;
5418 }
5419 }
5420
5421
5422
5423
5424
5425
5426
5427 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5428 if (status != SUCCESS)
5429 ret = FAILED;
5430
5431 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5432 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5433 return ret;
5434 }
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452 static int
5453 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5454 {
5455 struct Scsi_Host *shost = cmnd->device->host;
5456 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5457 struct lpfc_hba *phba = vport->phba;
5458 int rc, ret = SUCCESS;
5459
5460 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5461 "3172 SCSI layer issued Host Reset Data:\n");
5462
5463 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5464 lpfc_offline(phba);
5465 rc = lpfc_sli_brdrestart(phba);
5466 if (rc)
5467 goto error;
5468
5469 rc = lpfc_online(phba);
5470 if (rc)
5471 goto error;
5472
5473 lpfc_unblock_mgmt_io(phba);
5474
5475 return ret;
5476 error:
5477 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5478 "3323 Failed host reset\n");
5479 lpfc_unblock_mgmt_io(phba);
5480 return FAILED;
5481 }
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496 static int
5497 lpfc_slave_alloc(struct scsi_device *sdev)
5498 {
5499 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5500 struct lpfc_hba *phba = vport->phba;
5501 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5502 uint32_t total = 0;
5503 uint32_t num_to_alloc = 0;
5504 int num_allocated = 0;
5505 uint32_t sdev_cnt;
5506 struct lpfc_device_data *device_data;
5507 unsigned long flags;
5508 struct lpfc_name target_wwpn;
5509
5510 if (!rport || fc_remote_port_chkready(rport))
5511 return -ENXIO;
5512
5513 if (phba->cfg_fof) {
5514
5515
5516
5517
5518
5519
5520 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5521 spin_lock_irqsave(&phba->devicelock, flags);
5522 device_data = __lpfc_get_device_data(phba,
5523 &phba->luns,
5524 &vport->fc_portname,
5525 &target_wwpn,
5526 sdev->lun);
5527 if (!device_data) {
5528 spin_unlock_irqrestore(&phba->devicelock, flags);
5529 device_data = lpfc_create_device_data(phba,
5530 &vport->fc_portname,
5531 &target_wwpn,
5532 sdev->lun,
5533 phba->cfg_XLanePriority,
5534 true);
5535 if (!device_data)
5536 return -ENOMEM;
5537 spin_lock_irqsave(&phba->devicelock, flags);
5538 list_add_tail(&device_data->listentry, &phba->luns);
5539 }
5540 device_data->rport_data = rport->dd_data;
5541 device_data->available = true;
5542 spin_unlock_irqrestore(&phba->devicelock, flags);
5543 sdev->hostdata = device_data;
5544 } else {
5545 sdev->hostdata = rport->dd_data;
5546 }
5547 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5548
5549
5550 if (phba->sli_rev == LPFC_SLI_REV4)
5551 return 0;
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562 total = phba->total_scsi_bufs;
5563 num_to_alloc = vport->cfg_lun_queue_depth + 2;
5564
5565
5566 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5567 return 0;
5568
5569
5570 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5571 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5572 "0704 At limitation of %d preallocated "
5573 "command buffers\n", total);
5574 return 0;
5575
5576 } else if (total + num_to_alloc >
5577 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5578 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5579 "0705 Allocation request of %d "
5580 "command buffers will exceed max of %d. "
5581 "Reducing allocation request to %d.\n",
5582 num_to_alloc, phba->cfg_hba_queue_depth,
5583 (phba->cfg_hba_queue_depth - total));
5584 num_to_alloc = phba->cfg_hba_queue_depth - total;
5585 }
5586 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5587 if (num_to_alloc != num_allocated) {
5588 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5589 "0708 Allocation request of %d "
5590 "command buffers did not succeed. "
5591 "Allocated %d buffers.\n",
5592 num_to_alloc, num_allocated);
5593 }
5594 if (num_allocated > 0)
5595 phba->total_scsi_bufs += num_allocated;
5596 return 0;
5597 }
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610 static int
5611 lpfc_slave_configure(struct scsi_device *sdev)
5612 {
5613 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5614 struct lpfc_hba *phba = vport->phba;
5615
5616 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5617
5618 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5619 lpfc_sli_handle_fast_ring_event(phba,
5620 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5621 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5622 lpfc_poll_rearm_timer(phba);
5623 }
5624
5625 return 0;
5626 }
5627
5628
5629
5630
5631
5632
5633
5634 static void
5635 lpfc_slave_destroy(struct scsi_device *sdev)
5636 {
5637 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5638 struct lpfc_hba *phba = vport->phba;
5639 unsigned long flags;
5640 struct lpfc_device_data *device_data = sdev->hostdata;
5641
5642 atomic_dec(&phba->sdev_cnt);
5643 if ((phba->cfg_fof) && (device_data)) {
5644 spin_lock_irqsave(&phba->devicelock, flags);
5645 device_data->available = false;
5646 if (!device_data->oas_enabled)
5647 lpfc_delete_device_data(phba, device_data);
5648 spin_unlock_irqrestore(&phba->devicelock, flags);
5649 }
5650 sdev->hostdata = NULL;
5651 return;
5652 }
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672 struct lpfc_device_data*
5673 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5674 struct lpfc_name *target_wwpn, uint64_t lun,
5675 uint32_t pri, bool atomic_create)
5676 {
5677
5678 struct lpfc_device_data *lun_info;
5679 int memory_flags;
5680
5681 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5682 !(phba->cfg_fof))
5683 return NULL;
5684
5685
5686
5687 if (atomic_create)
5688 memory_flags = GFP_ATOMIC;
5689 else
5690 memory_flags = GFP_KERNEL;
5691 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5692 if (!lun_info)
5693 return NULL;
5694 INIT_LIST_HEAD(&lun_info->listentry);
5695 lun_info->rport_data = NULL;
5696 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5697 sizeof(struct lpfc_name));
5698 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5699 sizeof(struct lpfc_name));
5700 lun_info->device_id.lun = lun;
5701 lun_info->oas_enabled = false;
5702 lun_info->priority = pri;
5703 lun_info->available = false;
5704 return lun_info;
5705 }
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715 void
5716 lpfc_delete_device_data(struct lpfc_hba *phba,
5717 struct lpfc_device_data *lun_info)
5718 {
5719
5720 if (unlikely(!phba) || !lun_info ||
5721 !(phba->cfg_fof))
5722 return;
5723
5724 if (!list_empty(&lun_info->listentry))
5725 list_del(&lun_info->listentry);
5726 mempool_free(lun_info, phba->device_data_mem_pool);
5727 return;
5728 }
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746 struct lpfc_device_data*
5747 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5748 struct lpfc_name *vport_wwpn,
5749 struct lpfc_name *target_wwpn, uint64_t lun)
5750 {
5751
5752 struct lpfc_device_data *lun_info;
5753
5754 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5755 !phba->cfg_fof)
5756 return NULL;
5757
5758
5759
5760 list_for_each_entry(lun_info, list, listentry) {
5761 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5762 sizeof(struct lpfc_name)) == 0) &&
5763 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5764 sizeof(struct lpfc_name)) == 0) &&
5765 (lun_info->device_id.lun == lun))
5766 return lun_info;
5767 }
5768
5769 return NULL;
5770 }
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798 bool
5799 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5800 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5801 struct lpfc_name *found_vport_wwpn,
5802 struct lpfc_name *found_target_wwpn,
5803 uint64_t *found_lun,
5804 uint32_t *found_lun_status,
5805 uint32_t *found_lun_pri)
5806 {
5807
5808 unsigned long flags;
5809 struct lpfc_device_data *lun_info;
5810 struct lpfc_device_id *device_id;
5811 uint64_t lun;
5812 bool found = false;
5813
5814 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5815 !starting_lun || !found_vport_wwpn ||
5816 !found_target_wwpn || !found_lun || !found_lun_status ||
5817 (*starting_lun == NO_MORE_OAS_LUN) ||
5818 !phba->cfg_fof)
5819 return false;
5820
5821 lun = *starting_lun;
5822 *found_lun = NO_MORE_OAS_LUN;
5823 *starting_lun = NO_MORE_OAS_LUN;
5824
5825
5826
5827 spin_lock_irqsave(&phba->devicelock, flags);
5828 list_for_each_entry(lun_info, &phba->luns, listentry) {
5829 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5830 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5831 sizeof(struct lpfc_name)) == 0)) &&
5832 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5833 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5834 sizeof(struct lpfc_name)) == 0)) &&
5835 (lun_info->oas_enabled)) {
5836 device_id = &lun_info->device_id;
5837 if ((!found) &&
5838 ((lun == FIND_FIRST_OAS_LUN) ||
5839 (device_id->lun == lun))) {
5840 *found_lun = device_id->lun;
5841 memcpy(found_vport_wwpn,
5842 &device_id->vport_wwpn,
5843 sizeof(struct lpfc_name));
5844 memcpy(found_target_wwpn,
5845 &device_id->target_wwpn,
5846 sizeof(struct lpfc_name));
5847 if (lun_info->available)
5848 *found_lun_status =
5849 OAS_LUN_STATUS_EXISTS;
5850 else
5851 *found_lun_status = 0;
5852 *found_lun_pri = lun_info->priority;
5853 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5854 memset(vport_wwpn, 0x0,
5855 sizeof(struct lpfc_name));
5856 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5857 memset(target_wwpn, 0x0,
5858 sizeof(struct lpfc_name));
5859 found = true;
5860 } else if (found) {
5861 *starting_lun = device_id->lun;
5862 memcpy(vport_wwpn, &device_id->vport_wwpn,
5863 sizeof(struct lpfc_name));
5864 memcpy(target_wwpn, &device_id->target_wwpn,
5865 sizeof(struct lpfc_name));
5866 break;
5867 }
5868 }
5869 }
5870 spin_unlock_irqrestore(&phba->devicelock, flags);
5871 return found;
5872 }
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894 bool
5895 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5896 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5897 {
5898
5899 struct lpfc_device_data *lun_info;
5900 unsigned long flags;
5901
5902 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5903 !phba->cfg_fof)
5904 return false;
5905
5906 spin_lock_irqsave(&phba->devicelock, flags);
5907
5908
5909 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5910 target_wwpn, lun);
5911 if (lun_info) {
5912 if (!lun_info->oas_enabled)
5913 lun_info->oas_enabled = true;
5914 lun_info->priority = pri;
5915 spin_unlock_irqrestore(&phba->devicelock, flags);
5916 return true;
5917 }
5918
5919
5920 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5921 pri, true);
5922 if (lun_info) {
5923 lun_info->oas_enabled = true;
5924 lun_info->priority = pri;
5925 lun_info->available = false;
5926 list_add_tail(&lun_info->listentry, &phba->luns);
5927 spin_unlock_irqrestore(&phba->devicelock, flags);
5928 return true;
5929 }
5930 spin_unlock_irqrestore(&phba->devicelock, flags);
5931 return false;
5932 }
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953 bool
5954 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5955 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5956 {
5957
5958 struct lpfc_device_data *lun_info;
5959 unsigned long flags;
5960
5961 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5962 !phba->cfg_fof)
5963 return false;
5964
5965 spin_lock_irqsave(&phba->devicelock, flags);
5966
5967
5968 lun_info = __lpfc_get_device_data(phba,
5969 &phba->luns, vport_wwpn,
5970 target_wwpn, lun);
5971 if (lun_info) {
5972 lun_info->oas_enabled = false;
5973 lun_info->priority = pri;
5974 if (!lun_info->available)
5975 lpfc_delete_device_data(phba, lun_info);
5976 spin_unlock_irqrestore(&phba->devicelock, flags);
5977 return true;
5978 }
5979
5980 spin_unlock_irqrestore(&phba->devicelock, flags);
5981 return false;
5982 }
5983
5984 static int
5985 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5986 {
5987 return SCSI_MLQUEUE_HOST_BUSY;
5988 }
5989
5990 static int
5991 lpfc_no_handler(struct scsi_cmnd *cmnd)
5992 {
5993 return FAILED;
5994 }
5995
5996 static int
5997 lpfc_no_slave(struct scsi_device *sdev)
5998 {
5999 return -ENODEV;
6000 }
6001
6002 struct scsi_host_template lpfc_template_nvme = {
6003 .module = THIS_MODULE,
6004 .name = LPFC_DRIVER_NAME,
6005 .proc_name = LPFC_DRIVER_NAME,
6006 .info = lpfc_info,
6007 .queuecommand = lpfc_no_command,
6008 .eh_abort_handler = lpfc_no_handler,
6009 .eh_device_reset_handler = lpfc_no_handler,
6010 .eh_target_reset_handler = lpfc_no_handler,
6011 .eh_bus_reset_handler = lpfc_no_handler,
6012 .eh_host_reset_handler = lpfc_no_handler,
6013 .slave_alloc = lpfc_no_slave,
6014 .slave_configure = lpfc_no_slave,
6015 .scan_finished = lpfc_scan_finished,
6016 .this_id = -1,
6017 .sg_tablesize = 1,
6018 .cmd_per_lun = 1,
6019 .shost_attrs = lpfc_hba_attrs,
6020 .max_sectors = 0xFFFF,
6021 .vendor_id = LPFC_NL_VENDOR_ID,
6022 .track_queue_depth = 0,
6023 };
6024
6025 struct scsi_host_template lpfc_template_no_hr = {
6026 .module = THIS_MODULE,
6027 .name = LPFC_DRIVER_NAME,
6028 .proc_name = LPFC_DRIVER_NAME,
6029 .info = lpfc_info,
6030 .queuecommand = lpfc_queuecommand,
6031 .eh_timed_out = fc_eh_timed_out,
6032 .eh_abort_handler = lpfc_abort_handler,
6033 .eh_device_reset_handler = lpfc_device_reset_handler,
6034 .eh_target_reset_handler = lpfc_target_reset_handler,
6035 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6036 .slave_alloc = lpfc_slave_alloc,
6037 .slave_configure = lpfc_slave_configure,
6038 .slave_destroy = lpfc_slave_destroy,
6039 .scan_finished = lpfc_scan_finished,
6040 .this_id = -1,
6041 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6042 .cmd_per_lun = LPFC_CMD_PER_LUN,
6043 .shost_attrs = lpfc_hba_attrs,
6044 .max_sectors = 0xFFFFFFFF,
6045 .vendor_id = LPFC_NL_VENDOR_ID,
6046 .change_queue_depth = scsi_change_queue_depth,
6047 .track_queue_depth = 1,
6048 };
6049
6050 struct scsi_host_template lpfc_template = {
6051 .module = THIS_MODULE,
6052 .name = LPFC_DRIVER_NAME,
6053 .proc_name = LPFC_DRIVER_NAME,
6054 .info = lpfc_info,
6055 .queuecommand = lpfc_queuecommand,
6056 .eh_timed_out = fc_eh_timed_out,
6057 .eh_abort_handler = lpfc_abort_handler,
6058 .eh_device_reset_handler = lpfc_device_reset_handler,
6059 .eh_target_reset_handler = lpfc_target_reset_handler,
6060 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6061 .eh_host_reset_handler = lpfc_host_reset_handler,
6062 .slave_alloc = lpfc_slave_alloc,
6063 .slave_configure = lpfc_slave_configure,
6064 .slave_destroy = lpfc_slave_destroy,
6065 .scan_finished = lpfc_scan_finished,
6066 .this_id = -1,
6067 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6068 .cmd_per_lun = LPFC_CMD_PER_LUN,
6069 .shost_attrs = lpfc_hba_attrs,
6070 .max_sectors = 0xFFFF,
6071 .vendor_id = LPFC_NL_VENDOR_ID,
6072 .change_queue_depth = scsi_change_queue_depth,
6073 .track_queue_depth = 1,
6074 };
6075
6076 struct scsi_host_template lpfc_vport_template = {
6077 .module = THIS_MODULE,
6078 .name = LPFC_DRIVER_NAME,
6079 .proc_name = LPFC_DRIVER_NAME,
6080 .info = lpfc_info,
6081 .queuecommand = lpfc_queuecommand,
6082 .eh_timed_out = fc_eh_timed_out,
6083 .eh_abort_handler = lpfc_abort_handler,
6084 .eh_device_reset_handler = lpfc_device_reset_handler,
6085 .eh_target_reset_handler = lpfc_target_reset_handler,
6086 .slave_alloc = lpfc_slave_alloc,
6087 .slave_configure = lpfc_slave_configure,
6088 .slave_destroy = lpfc_slave_destroy,
6089 .scan_finished = lpfc_scan_finished,
6090 .this_id = -1,
6091 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6092 .cmd_per_lun = LPFC_CMD_PER_LUN,
6093 .shost_attrs = lpfc_vport_attrs,
6094 .max_sectors = 0xFFFF,
6095 .change_queue_depth = scsi_change_queue_depth,
6096 .track_queue_depth = 1,
6097 };