root/drivers/scsi/qla2xxx/qla_iocb.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qla2x00_get_cmd_direction
  2. qla2x00_calc_iocbs_32
  3. qla2x00_calc_iocbs_64
  4. qla2x00_prep_cont_type0_iocb
  5. qla2x00_prep_cont_type1_iocb
  6. qla24xx_configure_prot_mode
  7. qla2x00_build_scsi_iocbs_32
  8. qla2x00_build_scsi_iocbs_64
  9. qla2xxx_get_next_handle
  10. qla2x00_start_scsi
  11. qla2x00_start_iocbs
  12. __qla2x00_marker
  13. qla2x00_marker
  14. qla2x00_issue_marker
  15. qla24xx_build_scsi_type_6_iocbs
  16. qla24xx_calc_dsd_lists
  17. qla24xx_build_scsi_iocbs
  18. qla24xx_set_t10dif_tags
  19. qla24xx_get_one_block_sg
  20. qla24xx_walk_and_build_sglist_no_difb
  21. qla24xx_walk_and_build_sglist
  22. qla24xx_walk_and_build_prot_sglist
  23. qla24xx_build_scsi_crc_2_iocbs
  24. qla24xx_start_scsi
  25. qla24xx_dif_start_scsi
  26. qla2xxx_start_scsi_mq
  27. qla2xxx_dif_start_scsi_mq
  28. __qla2x00_alloc_iocbs
  29. qla2x00_alloc_iocbs_ready
  30. qla2x00_alloc_iocbs
  31. qla24xx_prli_iocb
  32. qla24xx_login_iocb
  33. qla2x00_login_iocb
  34. qla24xx_logout_iocb
  35. qla2x00_logout_iocb
  36. qla24xx_adisc_iocb
  37. qla2x00_adisc_iocb
  38. qla24xx_tm_iocb
  39. qla2x00_init_timer
  40. qla2x00_els_dcmd_sp_free
  41. qla2x00_els_dcmd_iocb_timeout
  42. qla2x00_els_dcmd_sp_done
  43. qla24xx_els_dcmd_iocb
  44. qla24xx_els_logo_iocb
  45. qla2x00_els_dcmd2_iocb_timeout
  46. qla2x00_els_dcmd2_free
  47. qla2x00_els_dcmd2_sp_done
  48. qla24xx_els_dcmd2_iocb
  49. qla24xx_els_iocb
  50. qla2x00_ct_iocb
  51. qla24xx_ct_iocb
  52. qla82xx_start_scsi
  53. qla24xx_abort_iocb
  54. qla2x00_mb_iocb
  55. qla2x00_ctpthru_cmd_iocb
  56. qla2x00_send_notify_ack_iocb
  57. qla_nvme_ls
  58. qla25xx_ctrlvp_iocb
  59. qla24xx_prlo_iocb
  60. qla2x00_start_sp
  61. qla25xx_build_bidir_iocb
  62. qla2x00_start_bidir

   1 /*
   2  * QLogic Fibre Channel HBA Driver
   3  * Copyright (c)  2003-2014 QLogic Corporation
   4  *
   5  * See LICENSE.qla2xxx for copyright and licensing details.
   6  */
   7 #include "qla_def.h"
   8 #include "qla_target.h"
   9 
  10 #include <linux/blkdev.h>
  11 #include <linux/delay.h>
  12 
  13 #include <scsi/scsi_tcq.h>
  14 
  15 /**
  16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  17  * @sp: SCSI command
  18  *
  19  * Returns the proper CF_* direction based on CDB.
  20  */
  21 static inline uint16_t
  22 qla2x00_get_cmd_direction(srb_t *sp)
  23 {
  24         uint16_t cflags;
  25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  26         struct scsi_qla_host *vha = sp->vha;
  27 
  28         cflags = 0;
  29 
  30         /* Set transfer direction */
  31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
  32                 cflags = CF_WRITE;
  33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
  34                 vha->qla_stats.output_requests++;
  35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
  36                 cflags = CF_READ;
  37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
  38                 vha->qla_stats.input_requests++;
  39         }
  40         return (cflags);
  41 }
  42 
  43 /**
  44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
  45  * Continuation Type 0 IOCBs to allocate.
  46  *
  47  * @dsds: number of data segment decriptors needed
  48  *
  49  * Returns the number of IOCB entries needed to store @dsds.
  50  */
  51 uint16_t
  52 qla2x00_calc_iocbs_32(uint16_t dsds)
  53 {
  54         uint16_t iocbs;
  55 
  56         iocbs = 1;
  57         if (dsds > 3) {
  58                 iocbs += (dsds - 3) / 7;
  59                 if ((dsds - 3) % 7)
  60                         iocbs++;
  61         }
  62         return (iocbs);
  63 }
  64 
  65 /**
  66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
  67  * Continuation Type 1 IOCBs to allocate.
  68  *
  69  * @dsds: number of data segment decriptors needed
  70  *
  71  * Returns the number of IOCB entries needed to store @dsds.
  72  */
  73 uint16_t
  74 qla2x00_calc_iocbs_64(uint16_t dsds)
  75 {
  76         uint16_t iocbs;
  77 
  78         iocbs = 1;
  79         if (dsds > 2) {
  80                 iocbs += (dsds - 2) / 5;
  81                 if ((dsds - 2) % 5)
  82                         iocbs++;
  83         }
  84         return (iocbs);
  85 }
  86 
  87 /**
  88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
  89  * @vha: HA context
  90  *
  91  * Returns a pointer to the Continuation Type 0 IOCB packet.
  92  */
  93 static inline cont_entry_t *
  94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  95 {
  96         cont_entry_t *cont_pkt;
  97         struct req_que *req = vha->req;
  98         /* Adjust ring index. */
  99         req->ring_index++;
 100         if (req->ring_index == req->length) {
 101                 req->ring_index = 0;
 102                 req->ring_ptr = req->ring;
 103         } else {
 104                 req->ring_ptr++;
 105         }
 106 
 107         cont_pkt = (cont_entry_t *)req->ring_ptr;
 108 
 109         /* Load packet defaults. */
 110         put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
 111 
 112         return (cont_pkt);
 113 }
 114 
 115 /**
 116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
 117  * @vha: HA context
 118  * @req: request queue
 119  *
 120  * Returns a pointer to the continuation type 1 IOCB packet.
 121  */
 122 static inline cont_a64_entry_t *
 123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 124 {
 125         cont_a64_entry_t *cont_pkt;
 126 
 127         /* Adjust ring index. */
 128         req->ring_index++;
 129         if (req->ring_index == req->length) {
 130                 req->ring_index = 0;
 131                 req->ring_ptr = req->ring;
 132         } else {
 133                 req->ring_ptr++;
 134         }
 135 
 136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 137 
 138         /* Load packet defaults. */
 139         put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
 140                            CONTINUE_A64_TYPE, &cont_pkt->entry_type);
 141 
 142         return (cont_pkt);
 143 }
 144 
 145 inline int
 146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
 147 {
 148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
 150 
 151         /* We always use DIFF Bundling for best performance */
 152         *fw_prot_opts = 0;
 153 
 154         /* Translate SCSI opcode to a protection opcode */
 155         switch (scsi_get_prot_op(cmd)) {
 156         case SCSI_PROT_READ_STRIP:
 157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 158                 break;
 159         case SCSI_PROT_WRITE_INSERT:
 160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
 161                 break;
 162         case SCSI_PROT_READ_INSERT:
 163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
 164                 break;
 165         case SCSI_PROT_WRITE_STRIP:
 166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
 167                 break;
 168         case SCSI_PROT_READ_PASS:
 169         case SCSI_PROT_WRITE_PASS:
 170                 if (guard & SHOST_DIX_GUARD_IP)
 171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
 172                 else
 173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
 174                 break;
 175         default:        /* Normal Request */
 176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
 177                 break;
 178         }
 179 
 180         return scsi_prot_sg_count(cmd);
 181 }
 182 
 183 /*
 184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
 185  * capable IOCB types.
 186  *
 187  * @sp: SRB command to process
 188  * @cmd_pkt: Command type 2 IOCB
 189  * @tot_dsds: Total number of segments to transfer
 190  */
 191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
 192     uint16_t tot_dsds)
 193 {
 194         uint16_t        avail_dsds;
 195         struct dsd32    *cur_dsd;
 196         scsi_qla_host_t *vha;
 197         struct scsi_cmnd *cmd;
 198         struct scatterlist *sg;
 199         int i;
 200 
 201         cmd = GET_CMD_SP(sp);
 202 
 203         /* Update entry type to indicate Command Type 2 IOCB */
 204         put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
 205 
 206         /* No data transfer */
 207         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 208                 cmd_pkt->byte_count = cpu_to_le32(0);
 209                 return;
 210         }
 211 
 212         vha = sp->vha;
 213         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 214 
 215         /* Three DSDs are available in the Command Type 2 IOCB */
 216         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
 217         cur_dsd = cmd_pkt->dsd32;
 218 
 219         /* Load data segments */
 220         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 221                 cont_entry_t *cont_pkt;
 222 
 223                 /* Allocate additional continuation packets? */
 224                 if (avail_dsds == 0) {
 225                         /*
 226                          * Seven DSDs are available in the Continuation
 227                          * Type 0 IOCB.
 228                          */
 229                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
 230                         cur_dsd = cont_pkt->dsd;
 231                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 232                 }
 233 
 234                 append_dsd32(&cur_dsd, sg);
 235                 avail_dsds--;
 236         }
 237 }
 238 
 239 /**
 240  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
 241  * capable IOCB types.
 242  *
 243  * @sp: SRB command to process
 244  * @cmd_pkt: Command type 3 IOCB
 245  * @tot_dsds: Total number of segments to transfer
 246  */
 247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
 248     uint16_t tot_dsds)
 249 {
 250         uint16_t        avail_dsds;
 251         struct dsd64    *cur_dsd;
 252         scsi_qla_host_t *vha;
 253         struct scsi_cmnd *cmd;
 254         struct scatterlist *sg;
 255         int i;
 256 
 257         cmd = GET_CMD_SP(sp);
 258 
 259         /* Update entry type to indicate Command Type 3 IOCB */
 260         put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
 261 
 262         /* No data transfer */
 263         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 264                 cmd_pkt->byte_count = cpu_to_le32(0);
 265                 return;
 266         }
 267 
 268         vha = sp->vha;
 269         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 270 
 271         /* Two DSDs are available in the Command Type 3 IOCB */
 272         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
 273         cur_dsd = cmd_pkt->dsd64;
 274 
 275         /* Load data segments */
 276         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 277                 cont_a64_entry_t *cont_pkt;
 278 
 279                 /* Allocate additional continuation packets? */
 280                 if (avail_dsds == 0) {
 281                         /*
 282                          * Five DSDs are available in the Continuation
 283                          * Type 1 IOCB.
 284                          */
 285                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
 286                         cur_dsd = cont_pkt->dsd;
 287                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 288                 }
 289 
 290                 append_dsd64(&cur_dsd, sg);
 291                 avail_dsds--;
 292         }
 293 }
 294 
 295 /*
 296  * Find the first handle that is not in use, starting from
 297  * req->current_outstanding_cmd + 1. The caller must hold the lock that is
 298  * associated with @req.
 299  */
 300 uint32_t qla2xxx_get_next_handle(struct req_que *req)
 301 {
 302         uint32_t index, handle = req->current_outstanding_cmd;
 303 
 304         for (index = 1; index < req->num_outstanding_cmds; index++) {
 305                 handle++;
 306                 if (handle == req->num_outstanding_cmds)
 307                         handle = 1;
 308                 if (!req->outstanding_cmds[handle])
 309                         return handle;
 310         }
 311 
 312         return 0;
 313 }
 314 
 315 /**
 316  * qla2x00_start_scsi() - Send a SCSI command to the ISP
 317  * @sp: command to send to the ISP
 318  *
 319  * Returns non-zero if a failure occurred, else zero.
 320  */
 321 int
 322 qla2x00_start_scsi(srb_t *sp)
 323 {
 324         int             nseg;
 325         unsigned long   flags;
 326         scsi_qla_host_t *vha;
 327         struct scsi_cmnd *cmd;
 328         uint32_t        *clr_ptr;
 329         uint32_t        handle;
 330         cmd_entry_t     *cmd_pkt;
 331         uint16_t        cnt;
 332         uint16_t        req_cnt;
 333         uint16_t        tot_dsds;
 334         struct device_reg_2xxx __iomem *reg;
 335         struct qla_hw_data *ha;
 336         struct req_que *req;
 337         struct rsp_que *rsp;
 338 
 339         /* Setup device pointers. */
 340         vha = sp->vha;
 341         ha = vha->hw;
 342         reg = &ha->iobase->isp;
 343         cmd = GET_CMD_SP(sp);
 344         req = ha->req_q_map[0];
 345         rsp = ha->rsp_q_map[0];
 346         /* So we know we haven't pci_map'ed anything yet */
 347         tot_dsds = 0;
 348 
 349         /* Send marker if required */
 350         if (vha->marker_needed != 0) {
 351                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
 352                     QLA_SUCCESS) {
 353                         return (QLA_FUNCTION_FAILED);
 354                 }
 355                 vha->marker_needed = 0;
 356         }
 357 
 358         /* Acquire ring specific lock */
 359         spin_lock_irqsave(&ha->hardware_lock, flags);
 360 
 361         handle = qla2xxx_get_next_handle(req);
 362         if (handle == 0)
 363                 goto queuing_error;
 364 
 365         /* Map the sg table so we have an accurate count of sg entries needed */
 366         if (scsi_sg_count(cmd)) {
 367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
 368                     scsi_sg_count(cmd), cmd->sc_data_direction);
 369                 if (unlikely(!nseg))
 370                         goto queuing_error;
 371         } else
 372                 nseg = 0;
 373 
 374         tot_dsds = nseg;
 375 
 376         /* Calculate the number of request entries needed. */
 377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
 378         if (req->cnt < (req_cnt + 2)) {
 379                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
 380                 if (req->ring_index < cnt)
 381                         req->cnt = cnt - req->ring_index;
 382                 else
 383                         req->cnt = req->length -
 384                             (req->ring_index - cnt);
 385                 /* If still no head room then bail out */
 386                 if (req->cnt < (req_cnt + 2))
 387                         goto queuing_error;
 388         }
 389 
 390         /* Build command packet */
 391         req->current_outstanding_cmd = handle;
 392         req->outstanding_cmds[handle] = sp;
 393         sp->handle = handle;
 394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 395         req->cnt -= req_cnt;
 396 
 397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
 398         cmd_pkt->handle = handle;
 399         /* Zero out remaining portion of packet. */
 400         clr_ptr = (uint32_t *)cmd_pkt + 2;
 401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
 402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
 403 
 404         /* Set target ID and LUN number*/
 405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
 407         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 408 
 409         /* Load SCSI command packet. */
 410         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
 411         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
 412 
 413         /* Build IOCB segments */
 414         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
 415 
 416         /* Set total data segment count. */
 417         cmd_pkt->entry_count = (uint8_t)req_cnt;
 418         wmb();
 419 
 420         /* Adjust ring index. */
 421         req->ring_index++;
 422         if (req->ring_index == req->length) {
 423                 req->ring_index = 0;
 424                 req->ring_ptr = req->ring;
 425         } else
 426                 req->ring_ptr++;
 427 
 428         sp->flags |= SRB_DMA_VALID;
 429 
 430         /* Set chip new ring index. */
 431         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
 432         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
 433 
 434         /* Manage unprocessed RIO/ZIO commands in response queue. */
 435         if (vha->flags.process_response_queue &&
 436             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
 437                 qla2x00_process_response_queue(rsp);
 438 
 439         spin_unlock_irqrestore(&ha->hardware_lock, flags);
 440         return (QLA_SUCCESS);
 441 
 442 queuing_error:
 443         if (tot_dsds)
 444                 scsi_dma_unmap(cmd);
 445 
 446         spin_unlock_irqrestore(&ha->hardware_lock, flags);
 447 
 448         return (QLA_FUNCTION_FAILED);
 449 }
 450 
 451 /**
 452  * qla2x00_start_iocbs() - Execute the IOCB command
 453  * @vha: HA context
 454  * @req: request queue
 455  */
 456 void
 457 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 458 {
 459         struct qla_hw_data *ha = vha->hw;
 460         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 461 
 462         if (IS_P3P_TYPE(ha)) {
 463                 qla82xx_start_iocbs(vha);
 464         } else {
 465                 /* Adjust ring index. */
 466                 req->ring_index++;
 467                 if (req->ring_index == req->length) {
 468                         req->ring_index = 0;
 469                         req->ring_ptr = req->ring;
 470                 } else
 471                         req->ring_ptr++;
 472 
 473                 /* Set chip new ring index. */
 474                 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
 475                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
 476                 } else if (IS_QLA83XX(ha)) {
 477                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
 478                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
 479                 } else if (IS_QLAFX00(ha)) {
 480                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
 481                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
 482                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 483                 } else if (IS_FWI2_CAPABLE(ha)) {
 484                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 485                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
 486                 } else {
 487                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
 488                                 req->ring_index);
 489                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
 490                 }
 491         }
 492 }
 493 
 494 /**
 495  * qla2x00_marker() - Send a marker IOCB to the firmware.
 496  * @vha: HA context
 497  * @qpair: queue pair pointer
 498  * @loop_id: loop ID
 499  * @lun: LUN
 500  * @type: marker modifier
 501  *
 502  * Can be called from both normal and interrupt context.
 503  *
 504  * Returns non-zero if a failure occurred, else zero.
 505  */
 506 static int
 507 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 508     uint16_t loop_id, uint64_t lun, uint8_t type)
 509 {
 510         mrk_entry_t *mrk;
 511         struct mrk_entry_24xx *mrk24 = NULL;
 512         struct req_que *req = qpair->req;
 513         struct qla_hw_data *ha = vha->hw;
 514         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 515 
 516         mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
 517         if (mrk == NULL) {
 518                 ql_log(ql_log_warn, base_vha, 0x3026,
 519                     "Failed to allocate Marker IOCB.\n");
 520 
 521                 return (QLA_FUNCTION_FAILED);
 522         }
 523 
 524         mrk->entry_type = MARKER_TYPE;
 525         mrk->modifier = type;
 526         if (type != MK_SYNC_ALL) {
 527                 if (IS_FWI2_CAPABLE(ha)) {
 528                         mrk24 = (struct mrk_entry_24xx *) mrk;
 529                         mrk24->nport_handle = cpu_to_le16(loop_id);
 530                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
 531                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
 532                         mrk24->vp_index = vha->vp_idx;
 533                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
 534                 } else {
 535                         SET_TARGET_ID(ha, mrk->target, loop_id);
 536                         mrk->lun = cpu_to_le16((uint16_t)lun);
 537                 }
 538         }
 539         wmb();
 540 
 541         qla2x00_start_iocbs(vha, req);
 542 
 543         return (QLA_SUCCESS);
 544 }
 545 
 546 int
 547 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
 548     uint16_t loop_id, uint64_t lun, uint8_t type)
 549 {
 550         int ret;
 551         unsigned long flags = 0;
 552 
 553         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 554         ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
 555         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 556 
 557         return (ret);
 558 }
 559 
 560 /*
 561  * qla2x00_issue_marker
 562  *
 563  * Issue marker
 564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
 565  * Might release it, then reaquire.
 566  */
 567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
 568 {
 569         if (ha_locked) {
 570                 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 571                                         MK_SYNC_ALL) != QLA_SUCCESS)
 572                         return QLA_FUNCTION_FAILED;
 573         } else {
 574                 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
 575                                         MK_SYNC_ALL) != QLA_SUCCESS)
 576                         return QLA_FUNCTION_FAILED;
 577         }
 578         vha->marker_needed = 0;
 579 
 580         return QLA_SUCCESS;
 581 }
 582 
 583 static inline int
 584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 585         uint16_t tot_dsds)
 586 {
 587         struct dsd64 *cur_dsd = NULL, *next_dsd;
 588         scsi_qla_host_t *vha;
 589         struct qla_hw_data *ha;
 590         struct scsi_cmnd *cmd;
 591         struct  scatterlist *cur_seg;
 592         uint8_t avail_dsds;
 593         uint8_t first_iocb = 1;
 594         uint32_t dsd_list_len;
 595         struct dsd_dma *dsd_ptr;
 596         struct ct6_dsd *ctx;
 597 
 598         cmd = GET_CMD_SP(sp);
 599 
 600         /* Update entry type to indicate Command Type 3 IOCB */
 601         put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
 602 
 603         /* No data transfer */
 604         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 605                 cmd_pkt->byte_count = cpu_to_le32(0);
 606                 return 0;
 607         }
 608 
 609         vha = sp->vha;
 610         ha = vha->hw;
 611 
 612         /* Set transfer direction */
 613         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 614                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 615                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 616                 vha->qla_stats.output_requests++;
 617         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 618                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 619                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 620                 vha->qla_stats.input_requests++;
 621         }
 622 
 623         cur_seg = scsi_sglist(cmd);
 624         ctx = sp->u.scmd.ct6_ctx;
 625 
 626         while (tot_dsds) {
 627                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
 628                     QLA_DSDS_PER_IOCB : tot_dsds;
 629                 tot_dsds -= avail_dsds;
 630                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
 631 
 632                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
 633                     struct dsd_dma, list);
 634                 next_dsd = dsd_ptr->dsd_addr;
 635                 list_del(&dsd_ptr->list);
 636                 ha->gbl_dsd_avail--;
 637                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
 638                 ctx->dsd_use_cnt++;
 639                 ha->gbl_dsd_inuse++;
 640 
 641                 if (first_iocb) {
 642                         first_iocb = 0;
 643                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
 644                                            &cmd_pkt->fcp_dsd.address);
 645                         cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
 646                 } else {
 647                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
 648                                            &cur_dsd->address);
 649                         cur_dsd->length = cpu_to_le32(dsd_list_len);
 650                         cur_dsd++;
 651                 }
 652                 cur_dsd = next_dsd;
 653                 while (avail_dsds) {
 654                         append_dsd64(&cur_dsd, cur_seg);
 655                         cur_seg = sg_next(cur_seg);
 656                         avail_dsds--;
 657                 }
 658         }
 659 
 660         /* Null termination */
 661         cur_dsd->address = 0;
 662         cur_dsd->length = 0;
 663         cur_dsd++;
 664         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
 665         return 0;
 666 }
 667 
 668 /*
 669  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
 670  * for Command Type 6.
 671  *
 672  * @dsds: number of data segment decriptors needed
 673  *
 674  * Returns the number of dsd list needed to store @dsds.
 675  */
 676 static inline uint16_t
 677 qla24xx_calc_dsd_lists(uint16_t dsds)
 678 {
 679         uint16_t dsd_lists = 0;
 680 
 681         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
 682         if (dsds % QLA_DSDS_PER_IOCB)
 683                 dsd_lists++;
 684         return dsd_lists;
 685 }
 686 
 687 
 688 /**
 689  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
 690  * IOCB types.
 691  *
 692  * @sp: SRB command to process
 693  * @cmd_pkt: Command type 3 IOCB
 694  * @tot_dsds: Total number of segments to transfer
 695  * @req: pointer to request queue
 696  */
 697 inline void
 698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
 699         uint16_t tot_dsds, struct req_que *req)
 700 {
 701         uint16_t        avail_dsds;
 702         struct dsd64    *cur_dsd;
 703         scsi_qla_host_t *vha;
 704         struct scsi_cmnd *cmd;
 705         struct scatterlist *sg;
 706         int i;
 707 
 708         cmd = GET_CMD_SP(sp);
 709 
 710         /* Update entry type to indicate Command Type 3 IOCB */
 711         put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
 712 
 713         /* No data transfer */
 714         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
 715                 cmd_pkt->byte_count = cpu_to_le32(0);
 716                 return;
 717         }
 718 
 719         vha = sp->vha;
 720 
 721         /* Set transfer direction */
 722         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 723                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 724                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 725                 vha->qla_stats.output_requests++;
 726         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 727                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 728                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 729                 vha->qla_stats.input_requests++;
 730         }
 731 
 732         /* One DSD is available in the Command Type 3 IOCB */
 733         avail_dsds = 1;
 734         cur_dsd = &cmd_pkt->dsd;
 735 
 736         /* Load data segments */
 737 
 738         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
 739                 cont_a64_entry_t *cont_pkt;
 740 
 741                 /* Allocate additional continuation packets? */
 742                 if (avail_dsds == 0) {
 743                         /*
 744                          * Five DSDs are available in the Continuation
 745                          * Type 1 IOCB.
 746                          */
 747                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
 748                         cur_dsd = cont_pkt->dsd;
 749                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
 750                 }
 751 
 752                 append_dsd64(&cur_dsd, sg);
 753                 avail_dsds--;
 754         }
 755 }
 756 
 757 struct fw_dif_context {
 758         uint32_t ref_tag;
 759         uint16_t app_tag;
 760         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
 761         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
 762 };
 763 
 764 /*
 765  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 766  *
 767  */
 768 static inline void
 769 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
 770     unsigned int protcnt)
 771 {
 772         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
 773 
 774         switch (scsi_get_prot_type(cmd)) {
 775         case SCSI_PROT_DIF_TYPE0:
 776                 /*
 777                  * No check for ql2xenablehba_err_chk, as it would be an
 778                  * I/O error if hba tag generation is not done.
 779                  */
 780                 pkt->ref_tag = cpu_to_le32((uint32_t)
 781                     (0xffffffff & scsi_get_lba(cmd)));
 782 
 783                 if (!qla2x00_hba_err_chk_enabled(sp))
 784                         break;
 785 
 786                 pkt->ref_tag_mask[0] = 0xff;
 787                 pkt->ref_tag_mask[1] = 0xff;
 788                 pkt->ref_tag_mask[2] = 0xff;
 789                 pkt->ref_tag_mask[3] = 0xff;
 790                 break;
 791 
 792         /*
 793          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 794          * match LBA in CDB + N
 795          */
 796         case SCSI_PROT_DIF_TYPE2:
 797                 pkt->app_tag = cpu_to_le16(0);
 798                 pkt->app_tag_mask[0] = 0x0;
 799                 pkt->app_tag_mask[1] = 0x0;
 800 
 801                 pkt->ref_tag = cpu_to_le32((uint32_t)
 802                     (0xffffffff & scsi_get_lba(cmd)));
 803 
 804                 if (!qla2x00_hba_err_chk_enabled(sp))
 805                         break;
 806 
 807                 /* enable ALL bytes of the ref tag */
 808                 pkt->ref_tag_mask[0] = 0xff;
 809                 pkt->ref_tag_mask[1] = 0xff;
 810                 pkt->ref_tag_mask[2] = 0xff;
 811                 pkt->ref_tag_mask[3] = 0xff;
 812                 break;
 813 
 814         /* For Type 3 protection: 16 bit GUARD only */
 815         case SCSI_PROT_DIF_TYPE3:
 816                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
 817                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
 818                                                                 0x00;
 819                 break;
 820 
 821         /*
 822          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 823          * 16 bit app tag.
 824          */
 825         case SCSI_PROT_DIF_TYPE1:
 826                 pkt->ref_tag = cpu_to_le32((uint32_t)
 827                     (0xffffffff & scsi_get_lba(cmd)));
 828                 pkt->app_tag = cpu_to_le16(0);
 829                 pkt->app_tag_mask[0] = 0x0;
 830                 pkt->app_tag_mask[1] = 0x0;
 831 
 832                 if (!qla2x00_hba_err_chk_enabled(sp))
 833                         break;
 834 
 835                 /* enable ALL bytes of the ref tag */
 836                 pkt->ref_tag_mask[0] = 0xff;
 837                 pkt->ref_tag_mask[1] = 0xff;
 838                 pkt->ref_tag_mask[2] = 0xff;
 839                 pkt->ref_tag_mask[3] = 0xff;
 840                 break;
 841         }
 842 }
 843 
 844 int
 845 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
 846         uint32_t *partial)
 847 {
 848         struct scatterlist *sg;
 849         uint32_t cumulative_partial, sg_len;
 850         dma_addr_t sg_dma_addr;
 851 
 852         if (sgx->num_bytes == sgx->tot_bytes)
 853                 return 0;
 854 
 855         sg = sgx->cur_sg;
 856         cumulative_partial = sgx->tot_partial;
 857 
 858         sg_dma_addr = sg_dma_address(sg);
 859         sg_len = sg_dma_len(sg);
 860 
 861         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
 862 
 863         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
 864                 sgx->dma_len = (blk_sz - cumulative_partial);
 865                 sgx->tot_partial = 0;
 866                 sgx->num_bytes += blk_sz;
 867                 *partial = 0;
 868         } else {
 869                 sgx->dma_len = sg_len - sgx->bytes_consumed;
 870                 sgx->tot_partial += sgx->dma_len;
 871                 *partial = 1;
 872         }
 873 
 874         sgx->bytes_consumed += sgx->dma_len;
 875 
 876         if (sg_len == sgx->bytes_consumed) {
 877                 sg = sg_next(sg);
 878                 sgx->num_sg++;
 879                 sgx->cur_sg = sg;
 880                 sgx->bytes_consumed = 0;
 881         }
 882 
 883         return 1;
 884 }
 885 
 886 int
 887 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
 888         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
 889 {
 890         void *next_dsd;
 891         uint8_t avail_dsds = 0;
 892         uint32_t dsd_list_len;
 893         struct dsd_dma *dsd_ptr;
 894         struct scatterlist *sg_prot;
 895         struct dsd64 *cur_dsd = dsd;
 896         uint16_t        used_dsds = tot_dsds;
 897         uint32_t        prot_int; /* protection interval */
 898         uint32_t        partial;
 899         struct qla2_sgx sgx;
 900         dma_addr_t      sle_dma;
 901         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
 902         struct scsi_cmnd *cmd;
 903 
 904         memset(&sgx, 0, sizeof(struct qla2_sgx));
 905         if (sp) {
 906                 cmd = GET_CMD_SP(sp);
 907                 prot_int = cmd->device->sector_size;
 908 
 909                 sgx.tot_bytes = scsi_bufflen(cmd);
 910                 sgx.cur_sg = scsi_sglist(cmd);
 911                 sgx.sp = sp;
 912 
 913                 sg_prot = scsi_prot_sglist(cmd);
 914         } else if (tc) {
 915                 prot_int      = tc->blk_sz;
 916                 sgx.tot_bytes = tc->bufflen;
 917                 sgx.cur_sg    = tc->sg;
 918                 sg_prot       = tc->prot_sg;
 919         } else {
 920                 BUG();
 921                 return 1;
 922         }
 923 
 924         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 925 
 926                 sle_dma = sgx.dma_addr;
 927                 sle_dma_len = sgx.dma_len;
 928 alloc_and_fill:
 929                 /* Allocate additional continuation packets? */
 930                 if (avail_dsds == 0) {
 931                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
 932                                         QLA_DSDS_PER_IOCB : used_dsds;
 933                         dsd_list_len = (avail_dsds + 1) * 12;
 934                         used_dsds -= avail_dsds;
 935 
 936                         /* allocate tracking DS */
 937                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
 938                         if (!dsd_ptr)
 939                                 return 1;
 940 
 941                         /* allocate new list */
 942                         dsd_ptr->dsd_addr = next_dsd =
 943                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
 944                                 &dsd_ptr->dsd_list_dma);
 945 
 946                         if (!next_dsd) {
 947                                 /*
 948                                  * Need to cleanup only this dsd_ptr, rest
 949                                  * will be done by sp_free_dma()
 950                                  */
 951                                 kfree(dsd_ptr);
 952                                 return 1;
 953                         }
 954 
 955                         if (sp) {
 956                                 list_add_tail(&dsd_ptr->list,
 957                                               &sp->u.scmd.crc_ctx->dsd_list);
 958 
 959                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
 960                         } else {
 961                                 list_add_tail(&dsd_ptr->list,
 962                                     &(tc->ctx->dsd_list));
 963                                 *tc->ctx_dsd_alloced = 1;
 964                         }
 965 
 966 
 967                         /* add new list to cmd iocb or last list */
 968                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
 969                                            &cur_dsd->address);
 970                         cur_dsd->length = cpu_to_le32(dsd_list_len);
 971                         cur_dsd = next_dsd;
 972                 }
 973                 put_unaligned_le64(sle_dma, &cur_dsd->address);
 974                 cur_dsd->length = cpu_to_le32(sle_dma_len);
 975                 cur_dsd++;
 976                 avail_dsds--;
 977 
 978                 if (partial == 0) {
 979                         /* Got a full protection interval */
 980                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
 981                         sle_dma_len = 8;
 982 
 983                         tot_prot_dma_len += sle_dma_len;
 984                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
 985                                 tot_prot_dma_len = 0;
 986                                 sg_prot = sg_next(sg_prot);
 987                         }
 988 
 989                         partial = 1; /* So as to not re-enter this block */
 990                         goto alloc_and_fill;
 991                 }
 992         }
 993         /* Null termination */
 994         cur_dsd->address = 0;
 995         cur_dsd->length = 0;
 996         cur_dsd++;
 997         return 0;
 998 }
 999 
1000 int
1001 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1002         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1003 {
1004         void *next_dsd;
1005         uint8_t avail_dsds = 0;
1006         uint32_t dsd_list_len;
1007         struct dsd_dma *dsd_ptr;
1008         struct scatterlist *sg, *sgl;
1009         struct dsd64 *cur_dsd = dsd;
1010         int     i;
1011         uint16_t        used_dsds = tot_dsds;
1012         struct scsi_cmnd *cmd;
1013 
1014         if (sp) {
1015                 cmd = GET_CMD_SP(sp);
1016                 sgl = scsi_sglist(cmd);
1017         } else if (tc) {
1018                 sgl = tc->sg;
1019         } else {
1020                 BUG();
1021                 return 1;
1022         }
1023 
1024 
1025         for_each_sg(sgl, sg, tot_dsds, i) {
1026                 /* Allocate additional continuation packets? */
1027                 if (avail_dsds == 0) {
1028                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1029                                         QLA_DSDS_PER_IOCB : used_dsds;
1030                         dsd_list_len = (avail_dsds + 1) * 12;
1031                         used_dsds -= avail_dsds;
1032 
1033                         /* allocate tracking DS */
1034                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1035                         if (!dsd_ptr)
1036                                 return 1;
1037 
1038                         /* allocate new list */
1039                         dsd_ptr->dsd_addr = next_dsd =
1040                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1041                                 &dsd_ptr->dsd_list_dma);
1042 
1043                         if (!next_dsd) {
1044                                 /*
1045                                  * Need to cleanup only this dsd_ptr, rest
1046                                  * will be done by sp_free_dma()
1047                                  */
1048                                 kfree(dsd_ptr);
1049                                 return 1;
1050                         }
1051 
1052                         if (sp) {
1053                                 list_add_tail(&dsd_ptr->list,
1054                                               &sp->u.scmd.crc_ctx->dsd_list);
1055 
1056                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057                         } else {
1058                                 list_add_tail(&dsd_ptr->list,
1059                                     &(tc->ctx->dsd_list));
1060                                 *tc->ctx_dsd_alloced = 1;
1061                         }
1062 
1063                         /* add new list to cmd iocb or last list */
1064                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065                                            &cur_dsd->address);
1066                         cur_dsd->length = cpu_to_le32(dsd_list_len);
1067                         cur_dsd = next_dsd;
1068                 }
1069                 append_dsd64(&cur_dsd, sg);
1070                 avail_dsds--;
1071 
1072         }
1073         /* Null termination */
1074         cur_dsd->address = 0;
1075         cur_dsd->length = 0;
1076         cur_dsd++;
1077         return 0;
1078 }
1079 
1080 int
1081 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1082         struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1083 {
1084         struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1085         struct scatterlist *sg, *sgl;
1086         struct crc_context *difctx = NULL;
1087         struct scsi_qla_host *vha;
1088         uint dsd_list_len;
1089         uint avail_dsds = 0;
1090         uint used_dsds = tot_dsds;
1091         bool dif_local_dma_alloc = false;
1092         bool direction_to_device = false;
1093         int i;
1094 
1095         if (sp) {
1096                 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1097 
1098                 sgl = scsi_prot_sglist(cmd);
1099                 vha = sp->vha;
1100                 difctx = sp->u.scmd.crc_ctx;
1101                 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1102                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1103                   "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1104                         __func__, cmd, difctx, sp);
1105         } else if (tc) {
1106                 vha = tc->vha;
1107                 sgl = tc->prot_sg;
1108                 difctx = tc->ctx;
1109                 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1110         } else {
1111                 BUG();
1112                 return 1;
1113         }
1114 
1115         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116             "%s: enter (write=%u)\n", __func__, direction_to_device);
1117 
1118         /* if initiator doing write or target doing read */
1119         if (direction_to_device) {
1120                 for_each_sg(sgl, sg, tot_dsds, i) {
1121                         u64 sle_phys = sg_phys(sg);
1122 
1123                         /* If SGE addr + len flips bits in upper 32-bits */
1124                         if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1125                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1126                                     "%s: page boundary crossing (phys=%llx len=%x)\n",
1127                                     __func__, sle_phys, sg->length);
1128 
1129                                 if (difctx) {
1130                                         ha->dif_bundle_crossed_pages++;
1131                                         dif_local_dma_alloc = true;
1132                                 } else {
1133                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134                                             vha, 0xe022,
1135                                             "%s: difctx pointer is NULL\n",
1136                                             __func__);
1137                                 }
1138                                 break;
1139                         }
1140                 }
1141                 ha->dif_bundle_writes++;
1142         } else {
1143                 ha->dif_bundle_reads++;
1144         }
1145 
1146         if (ql2xdifbundlinginternalbuffers)
1147                 dif_local_dma_alloc = direction_to_device;
1148 
1149         if (dif_local_dma_alloc) {
1150                 u32 track_difbundl_buf = 0;
1151                 u32 ldma_sg_len = 0;
1152                 u8 ldma_needed = 1;
1153 
1154                 difctx->no_dif_bundl = 0;
1155                 difctx->dif_bundl_len = 0;
1156 
1157                 /* Track DSD buffers */
1158                 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1159                 /* Track local DMA buffers */
1160                 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1161 
1162                 for_each_sg(sgl, sg, tot_dsds, i) {
1163                         u32 sglen = sg_dma_len(sg);
1164 
1165                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1166                             "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1167                             __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1168                             difctx->dif_bundl_len, ldma_needed);
1169 
1170                         while (sglen) {
1171                                 u32 xfrlen = 0;
1172 
1173                                 if (ldma_needed) {
1174                                         /*
1175                                          * Allocate list item to store
1176                                          * the DMA buffers
1177                                          */
1178                                         dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1179                                             GFP_ATOMIC);
1180                                         if (!dsd_ptr) {
1181                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1182                                                     "%s: failed alloc dsd_ptr\n",
1183                                                     __func__);
1184                                                 return 1;
1185                                         }
1186                                         ha->dif_bundle_kallocs++;
1187 
1188                                         /* allocate dma buffer */
1189                                         dsd_ptr->dsd_addr = dma_pool_alloc
1190                                                 (ha->dif_bundl_pool, GFP_ATOMIC,
1191                                                  &dsd_ptr->dsd_list_dma);
1192                                         if (!dsd_ptr->dsd_addr) {
1193                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1194                                                     "%s: failed alloc ->dsd_ptr\n",
1195                                                     __func__);
1196                                                 /*
1197                                                  * need to cleanup only this
1198                                                  * dsd_ptr rest will be done
1199                                                  * by sp_free_dma()
1200                                                  */
1201                                                 kfree(dsd_ptr);
1202                                                 ha->dif_bundle_kallocs--;
1203                                                 return 1;
1204                                         }
1205                                         ha->dif_bundle_dma_allocs++;
1206                                         ldma_needed = 0;
1207                                         difctx->no_dif_bundl++;
1208                                         list_add_tail(&dsd_ptr->list,
1209                                             &difctx->ldif_dma_hndl_list);
1210                                 }
1211 
1212                                 /* xfrlen is min of dma pool size and sglen */
1213                                 xfrlen = (sglen >
1214                                    (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1215                                     DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1216                                     sglen;
1217 
1218                                 /* replace with local allocated dma buffer */
1219                                 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1220                                     dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1221                                     difctx->dif_bundl_len);
1222                                 difctx->dif_bundl_len += xfrlen;
1223                                 sglen -= xfrlen;
1224                                 ldma_sg_len += xfrlen;
1225                                 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1226                                     sg_is_last(sg)) {
1227                                         ldma_needed = 1;
1228                                         ldma_sg_len = 0;
1229                                 }
1230                         }
1231                 }
1232 
1233                 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1234                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1235                     "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1236                     difctx->dif_bundl_len, difctx->no_dif_bundl,
1237                     track_difbundl_buf);
1238 
1239                 if (sp)
1240                         sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241                 else
1242                         tc->prot_flags = DIF_BUNDL_DMA_VALID;
1243 
1244                 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1245                     &difctx->ldif_dma_hndl_list, list) {
1246                         u32 sglen = (difctx->dif_bundl_len >
1247                             DIF_BUNDLING_DMA_POOL_SIZE) ?
1248                             DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1249 
1250                         BUG_ON(track_difbundl_buf == 0);
1251 
1252                         /* Allocate additional continuation packets? */
1253                         if (avail_dsds == 0) {
1254                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255                                     0xe024,
1256                                     "%s: adding continuation iocb's\n",
1257                                     __func__);
1258                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1259                                     QLA_DSDS_PER_IOCB : used_dsds;
1260                                 dsd_list_len = (avail_dsds + 1) * 12;
1261                                 used_dsds -= avail_dsds;
1262 
1263                                 /* allocate tracking DS */
1264                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265                                 if (!dsd_ptr) {
1266                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1267                                             "%s: failed alloc dsd_ptr\n",
1268                                             __func__);
1269                                         return 1;
1270                                 }
1271                                 ha->dif_bundle_kallocs++;
1272 
1273                                 difctx->no_ldif_dsd++;
1274                                 /* allocate new list */
1275                                 dsd_ptr->dsd_addr =
1276                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1277                                         &dsd_ptr->dsd_list_dma);
1278                                 if (!dsd_ptr->dsd_addr) {
1279                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280                                             "%s: failed alloc ->dsd_addr\n",
1281                                             __func__);
1282                                         /*
1283                                          * need to cleanup only this dsd_ptr
1284                                          *  rest will be done by sp_free_dma()
1285                                          */
1286                                         kfree(dsd_ptr);
1287                                         ha->dif_bundle_kallocs--;
1288                                         return 1;
1289                                 }
1290                                 ha->dif_bundle_dma_allocs++;
1291 
1292                                 if (sp) {
1293                                         list_add_tail(&dsd_ptr->list,
1294                                             &difctx->ldif_dsd_list);
1295                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296                                 } else {
1297                                         list_add_tail(&dsd_ptr->list,
1298                                             &difctx->ldif_dsd_list);
1299                                         tc->ctx_dsd_alloced = 1;
1300                                 }
1301 
1302                                 /* add new list to cmd iocb or last list */
1303                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304                                                    &cur_dsd->address);
1305                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1306                                 cur_dsd = dsd_ptr->dsd_addr;
1307                         }
1308                         put_unaligned_le64(dif_dsd->dsd_list_dma,
1309                                            &cur_dsd->address);
1310                         cur_dsd->length = cpu_to_le32(sglen);
1311                         cur_dsd++;
1312                         avail_dsds--;
1313                         difctx->dif_bundl_len -= sglen;
1314                         track_difbundl_buf--;
1315                 }
1316 
1317                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1318                     "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1319                         difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320         } else {
1321                 for_each_sg(sgl, sg, tot_dsds, i) {
1322                         /* Allocate additional continuation packets? */
1323                         if (avail_dsds == 0) {
1324                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1325                                     QLA_DSDS_PER_IOCB : used_dsds;
1326                                 dsd_list_len = (avail_dsds + 1) * 12;
1327                                 used_dsds -= avail_dsds;
1328 
1329                                 /* allocate tracking DS */
1330                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331                                 if (!dsd_ptr) {
1332                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333                                             vha, 0xe027,
1334                                             "%s: failed alloc dsd_dma...\n",
1335                                             __func__);
1336                                         return 1;
1337                                 }
1338 
1339                                 /* allocate new list */
1340                                 dsd_ptr->dsd_addr =
1341                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1342                                         &dsd_ptr->dsd_list_dma);
1343                                 if (!dsd_ptr->dsd_addr) {
1344                                         /* need to cleanup only this dsd_ptr */
1345                                         /* rest will be done by sp_free_dma() */
1346                                         kfree(dsd_ptr);
1347                                         return 1;
1348                                 }
1349 
1350                                 if (sp) {
1351                                         list_add_tail(&dsd_ptr->list,
1352                                             &difctx->dsd_list);
1353                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354                                 } else {
1355                                         list_add_tail(&dsd_ptr->list,
1356                                             &difctx->dsd_list);
1357                                         tc->ctx_dsd_alloced = 1;
1358                                 }
1359 
1360                                 /* add new list to cmd iocb or last list */
1361                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362                                                    &cur_dsd->address);
1363                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1364                                 cur_dsd = dsd_ptr->dsd_addr;
1365                         }
1366                         append_dsd64(&cur_dsd, sg);
1367                         avail_dsds--;
1368                 }
1369         }
1370         /* Null termination */
1371         cur_dsd->address = 0;
1372         cur_dsd->length = 0;
1373         cur_dsd++;
1374         return 0;
1375 }
1376 
1377 /**
1378  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1379  *                                                      Type 6 IOCB types.
1380  *
1381  * @sp: SRB command to process
1382  * @cmd_pkt: Command type 3 IOCB
1383  * @tot_dsds: Total number of segments to transfer
1384  * @tot_prot_dsds: Total number of segments with protection information
1385  * @fw_prot_opts: Protection options to be passed to firmware
1386  */
1387 static inline int
1388 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1389     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1390 {
1391         struct dsd64            *cur_dsd;
1392         uint32_t                *fcp_dl;
1393         scsi_qla_host_t         *vha;
1394         struct scsi_cmnd        *cmd;
1395         uint32_t                total_bytes = 0;
1396         uint32_t                data_bytes;
1397         uint32_t                dif_bytes;
1398         uint8_t                 bundling = 1;
1399         uint16_t                blk_size;
1400         struct crc_context      *crc_ctx_pkt = NULL;
1401         struct qla_hw_data      *ha;
1402         uint8_t                 additional_fcpcdb_len;
1403         uint16_t                fcp_cmnd_len;
1404         struct fcp_cmnd         *fcp_cmnd;
1405         dma_addr_t              crc_ctx_dma;
1406 
1407         cmd = GET_CMD_SP(sp);
1408 
1409         /* Update entry type to indicate Command Type CRC_2 IOCB */
1410         put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1411 
1412         vha = sp->vha;
1413         ha = vha->hw;
1414 
1415         /* No data transfer */
1416         data_bytes = scsi_bufflen(cmd);
1417         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1418                 cmd_pkt->byte_count = cpu_to_le32(0);
1419                 return QLA_SUCCESS;
1420         }
1421 
1422         cmd_pkt->vp_index = sp->vha->vp_idx;
1423 
1424         /* Set transfer direction */
1425         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1426                 cmd_pkt->control_flags =
1427                     cpu_to_le16(CF_WRITE_DATA);
1428         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1429                 cmd_pkt->control_flags =
1430                     cpu_to_le16(CF_READ_DATA);
1431         }
1432 
1433         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1434             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1435             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1436             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1437                 bundling = 0;
1438 
1439         /* Allocate CRC context from global pool */
1440         crc_ctx_pkt = sp->u.scmd.crc_ctx =
1441             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1442 
1443         if (!crc_ctx_pkt)
1444                 goto crc_queuing_error;
1445 
1446         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1447 
1448         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1449 
1450         /* Set handle */
1451         crc_ctx_pkt->handle = cmd_pkt->handle;
1452 
1453         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1454 
1455         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1456             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1457 
1458         put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1459         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1460 
1461         /* Determine SCSI command length -- align to 4 byte boundary */
1462         if (cmd->cmd_len > 16) {
1463                 additional_fcpcdb_len = cmd->cmd_len - 16;
1464                 if ((cmd->cmd_len % 4) != 0) {
1465                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1466                         goto crc_queuing_error;
1467                 }
1468                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469         } else {
1470                 additional_fcpcdb_len = 0;
1471                 fcp_cmnd_len = 12 + 16 + 4;
1472         }
1473 
1474         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1475 
1476         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1477         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1478                 fcp_cmnd->additional_cdb_len |= 1;
1479         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1480                 fcp_cmnd->additional_cdb_len |= 2;
1481 
1482         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1483         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1484         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1485         put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1486                            &cmd_pkt->fcp_cmnd_dseg_address);
1487         fcp_cmnd->task_management = 0;
1488         fcp_cmnd->task_attribute = TSK_SIMPLE;
1489 
1490         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1491 
1492         /* Compute dif len and adjust data len to incude protection */
1493         dif_bytes = 0;
1494         blk_size = cmd->device->sector_size;
1495         dif_bytes = (data_bytes / blk_size) * 8;
1496 
1497         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1498         case SCSI_PROT_READ_INSERT:
1499         case SCSI_PROT_WRITE_STRIP:
1500                 total_bytes = data_bytes;
1501                 data_bytes += dif_bytes;
1502                 break;
1503 
1504         case SCSI_PROT_READ_STRIP:
1505         case SCSI_PROT_WRITE_INSERT:
1506         case SCSI_PROT_READ_PASS:
1507         case SCSI_PROT_WRITE_PASS:
1508                 total_bytes = data_bytes + dif_bytes;
1509                 break;
1510         default:
1511                 BUG();
1512         }
1513 
1514         if (!qla2x00_hba_err_chk_enabled(sp))
1515                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1516         /* HBA error checking enabled */
1517         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1518                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1519                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1520                         SCSI_PROT_DIF_TYPE2))
1521                         fw_prot_opts |= BIT_10;
1522                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1523                     SCSI_PROT_DIF_TYPE3)
1524                         fw_prot_opts |= BIT_11;
1525         }
1526 
1527         if (!bundling) {
1528                 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1529         } else {
1530                 /*
1531                  * Configure Bundling if we need to fetch interlaving
1532                  * protection PCI accesses
1533                  */
1534                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1535                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1536                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537                                                         tot_prot_dsds);
1538                 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1539         }
1540 
1541         /* Finish the common fields of CRC pkt */
1542         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1543         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1544         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1545         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1546         /* Fibre channel byte count */
1547         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1548         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549             additional_fcpcdb_len);
1550         *fcp_dl = htonl(total_bytes);
1551 
1552         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1553                 cmd_pkt->byte_count = cpu_to_le32(0);
1554                 return QLA_SUCCESS;
1555         }
1556         /* Walks data segments */
1557 
1558         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1559 
1560         if (!bundling && tot_prot_dsds) {
1561                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1562                         cur_dsd, tot_dsds, NULL))
1563                         goto crc_queuing_error;
1564         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1565                         (tot_dsds - tot_prot_dsds), NULL))
1566                 goto crc_queuing_error;
1567 
1568         if (bundling && tot_prot_dsds) {
1569                 /* Walks dif segments */
1570                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1571                 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1572                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1573                                 tot_prot_dsds, NULL))
1574                         goto crc_queuing_error;
1575         }
1576         return QLA_SUCCESS;
1577 
1578 crc_queuing_error:
1579         /* Cleanup will be performed by the caller */
1580 
1581         return QLA_FUNCTION_FAILED;
1582 }
1583 
1584 /**
1585  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1586  * @sp: command to send to the ISP
1587  *
1588  * Returns non-zero if a failure occurred, else zero.
1589  */
1590 int
1591 qla24xx_start_scsi(srb_t *sp)
1592 {
1593         int             nseg;
1594         unsigned long   flags;
1595         uint32_t        *clr_ptr;
1596         uint32_t        handle;
1597         struct cmd_type_7 *cmd_pkt;
1598         uint16_t        cnt;
1599         uint16_t        req_cnt;
1600         uint16_t        tot_dsds;
1601         struct req_que *req = NULL;
1602         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1603         struct scsi_qla_host *vha = sp->vha;
1604         struct qla_hw_data *ha = vha->hw;
1605 
1606         /* Setup device pointers. */
1607         req = vha->req;
1608 
1609         /* So we know we haven't pci_map'ed anything yet */
1610         tot_dsds = 0;
1611 
1612         /* Send marker if required */
1613         if (vha->marker_needed != 0) {
1614                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615                     QLA_SUCCESS)
1616                         return QLA_FUNCTION_FAILED;
1617                 vha->marker_needed = 0;
1618         }
1619 
1620         /* Acquire ring specific lock */
1621         spin_lock_irqsave(&ha->hardware_lock, flags);
1622 
1623         handle = qla2xxx_get_next_handle(req);
1624         if (handle == 0)
1625                 goto queuing_error;
1626 
1627         /* Map the sg table so we have an accurate count of sg entries needed */
1628         if (scsi_sg_count(cmd)) {
1629                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630                     scsi_sg_count(cmd), cmd->sc_data_direction);
1631                 if (unlikely(!nseg))
1632                         goto queuing_error;
1633         } else
1634                 nseg = 0;
1635 
1636         tot_dsds = nseg;
1637         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638         if (req->cnt < (req_cnt + 2)) {
1639                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640                     RD_REG_DWORD_RELAXED(req->req_q_out);
1641                 if (req->ring_index < cnt)
1642                         req->cnt = cnt - req->ring_index;
1643                 else
1644                         req->cnt = req->length -
1645                                 (req->ring_index - cnt);
1646                 if (req->cnt < (req_cnt + 2))
1647                         goto queuing_error;
1648         }
1649 
1650         /* Build command packet. */
1651         req->current_outstanding_cmd = handle;
1652         req->outstanding_cmds[handle] = sp;
1653         sp->handle = handle;
1654         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655         req->cnt -= req_cnt;
1656 
1657         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1659 
1660         /* Zero out remaining portion of packet. */
1661         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662         clr_ptr = (uint32_t *)cmd_pkt + 2;
1663         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665 
1666         /* Set NPORT-ID and LUN number*/
1667         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671         cmd_pkt->vp_index = sp->vha->vp_idx;
1672 
1673         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1675 
1676         cmd_pkt->task = TSK_SIMPLE;
1677 
1678         /* Load SCSI command packet. */
1679         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681 
1682         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1683 
1684         /* Build IOCB segments */
1685         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1686 
1687         /* Set total data segment count. */
1688         cmd_pkt->entry_count = (uint8_t)req_cnt;
1689         wmb();
1690         /* Adjust ring index. */
1691         req->ring_index++;
1692         if (req->ring_index == req->length) {
1693                 req->ring_index = 0;
1694                 req->ring_ptr = req->ring;
1695         } else
1696                 req->ring_ptr++;
1697 
1698         sp->flags |= SRB_DMA_VALID;
1699 
1700         /* Set chip new ring index. */
1701         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1702 
1703         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704         return QLA_SUCCESS;
1705 
1706 queuing_error:
1707         if (tot_dsds)
1708                 scsi_dma_unmap(cmd);
1709 
1710         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711 
1712         return QLA_FUNCTION_FAILED;
1713 }
1714 
1715 /**
1716  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717  * @sp: command to send to the ISP
1718  *
1719  * Returns non-zero if a failure occurred, else zero.
1720  */
1721 int
1722 qla24xx_dif_start_scsi(srb_t *sp)
1723 {
1724         int                     nseg;
1725         unsigned long           flags;
1726         uint32_t                *clr_ptr;
1727         uint32_t                handle;
1728         uint16_t                cnt;
1729         uint16_t                req_cnt = 0;
1730         uint16_t                tot_dsds;
1731         uint16_t                tot_prot_dsds;
1732         uint16_t                fw_prot_opts = 0;
1733         struct req_que          *req = NULL;
1734         struct rsp_que          *rsp = NULL;
1735         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1736         struct scsi_qla_host    *vha = sp->vha;
1737         struct qla_hw_data      *ha = vha->hw;
1738         struct cmd_type_crc_2   *cmd_pkt;
1739         uint32_t                status = 0;
1740 
1741 #define QDSS_GOT_Q_SPACE        BIT_0
1742 
1743         /* Only process protection or >16 cdb in this routine */
1744         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745                 if (cmd->cmd_len <= 16)
1746                         return qla24xx_start_scsi(sp);
1747         }
1748 
1749         /* Setup device pointers. */
1750         req = vha->req;
1751         rsp = req->rsp;
1752 
1753         /* So we know we haven't pci_map'ed anything yet */
1754         tot_dsds = 0;
1755 
1756         /* Send marker if required */
1757         if (vha->marker_needed != 0) {
1758                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759                     QLA_SUCCESS)
1760                         return QLA_FUNCTION_FAILED;
1761                 vha->marker_needed = 0;
1762         }
1763 
1764         /* Acquire ring specific lock */
1765         spin_lock_irqsave(&ha->hardware_lock, flags);
1766 
1767         handle = qla2xxx_get_next_handle(req);
1768         if (handle == 0)
1769                 goto queuing_error;
1770 
1771         /* Compute number of required data segments */
1772         /* Map the sg table so we have an accurate count of sg entries needed */
1773         if (scsi_sg_count(cmd)) {
1774                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1775                     scsi_sg_count(cmd), cmd->sc_data_direction);
1776                 if (unlikely(!nseg))
1777                         goto queuing_error;
1778                 else
1779                         sp->flags |= SRB_DMA_VALID;
1780 
1781                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1782                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1783                         struct qla2_sgx sgx;
1784                         uint32_t        partial;
1785 
1786                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1787                         sgx.tot_bytes = scsi_bufflen(cmd);
1788                         sgx.cur_sg = scsi_sglist(cmd);
1789                         sgx.sp = sp;
1790 
1791                         nseg = 0;
1792                         while (qla24xx_get_one_block_sg(
1793                             cmd->device->sector_size, &sgx, &partial))
1794                                 nseg++;
1795                 }
1796         } else
1797                 nseg = 0;
1798 
1799         /* number of required data segments */
1800         tot_dsds = nseg;
1801 
1802         /* Compute number of required protection segments */
1803         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1804                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1805                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1806                 if (unlikely(!nseg))
1807                         goto queuing_error;
1808                 else
1809                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1810 
1811                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1812                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1813                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1814                 }
1815         } else {
1816                 nseg = 0;
1817         }
1818 
1819         req_cnt = 1;
1820         /* Total Data and protection sg segment(s) */
1821         tot_prot_dsds = nseg;
1822         tot_dsds += nseg;
1823         if (req->cnt < (req_cnt + 2)) {
1824                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1825                     RD_REG_DWORD_RELAXED(req->req_q_out);
1826                 if (req->ring_index < cnt)
1827                         req->cnt = cnt - req->ring_index;
1828                 else
1829                         req->cnt = req->length -
1830                                 (req->ring_index - cnt);
1831                 if (req->cnt < (req_cnt + 2))
1832                         goto queuing_error;
1833         }
1834 
1835         status |= QDSS_GOT_Q_SPACE;
1836 
1837         /* Build header part of command packet (excluding the OPCODE). */
1838         req->current_outstanding_cmd = handle;
1839         req->outstanding_cmds[handle] = sp;
1840         sp->handle = handle;
1841         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1842         req->cnt -= req_cnt;
1843 
1844         /* Fill-in common area */
1845         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1846         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1847 
1848         clr_ptr = (uint32_t *)cmd_pkt + 2;
1849         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1850 
1851         /* Set NPORT-ID and LUN number*/
1852         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1853         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1854         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1855         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1856 
1857         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1858         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1859 
1860         /* Total Data and protection segment(s) */
1861         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1862 
1863         /* Build IOCB segments and adjust for data protection segments */
1864         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1865             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1866                 QLA_SUCCESS)
1867                 goto queuing_error;
1868 
1869         cmd_pkt->entry_count = (uint8_t)req_cnt;
1870         /* Specify response queue number where completion should happen */
1871         cmd_pkt->entry_status = (uint8_t) rsp->id;
1872         cmd_pkt->timeout = cpu_to_le16(0);
1873         wmb();
1874 
1875         /* Adjust ring index. */
1876         req->ring_index++;
1877         if (req->ring_index == req->length) {
1878                 req->ring_index = 0;
1879                 req->ring_ptr = req->ring;
1880         } else
1881                 req->ring_ptr++;
1882 
1883         /* Set chip new ring index. */
1884         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1885 
1886         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1887 
1888         return QLA_SUCCESS;
1889 
1890 queuing_error:
1891         if (status & QDSS_GOT_Q_SPACE) {
1892                 req->outstanding_cmds[handle] = NULL;
1893                 req->cnt += req_cnt;
1894         }
1895         /* Cleanup will be performed by the caller (queuecommand) */
1896 
1897         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898         return QLA_FUNCTION_FAILED;
1899 }
1900 
1901 /**
1902  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1903  * @sp: command to send to the ISP
1904  *
1905  * Returns non-zero if a failure occurred, else zero.
1906  */
1907 static int
1908 qla2xxx_start_scsi_mq(srb_t *sp)
1909 {
1910         int             nseg;
1911         unsigned long   flags;
1912         uint32_t        *clr_ptr;
1913         uint32_t        handle;
1914         struct cmd_type_7 *cmd_pkt;
1915         uint16_t        cnt;
1916         uint16_t        req_cnt;
1917         uint16_t        tot_dsds;
1918         struct req_que *req = NULL;
1919         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1920         struct scsi_qla_host *vha = sp->fcport->vha;
1921         struct qla_hw_data *ha = vha->hw;
1922         struct qla_qpair *qpair = sp->qpair;
1923 
1924         /* Acquire qpair specific lock */
1925         spin_lock_irqsave(&qpair->qp_lock, flags);
1926 
1927         /* Setup qpair pointers */
1928         req = qpair->req;
1929 
1930         /* So we know we haven't pci_map'ed anything yet */
1931         tot_dsds = 0;
1932 
1933         /* Send marker if required */
1934         if (vha->marker_needed != 0) {
1935                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936                     QLA_SUCCESS) {
1937                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938                         return QLA_FUNCTION_FAILED;
1939                 }
1940                 vha->marker_needed = 0;
1941         }
1942 
1943         handle = qla2xxx_get_next_handle(req);
1944         if (handle == 0)
1945                 goto queuing_error;
1946 
1947         /* Map the sg table so we have an accurate count of sg entries needed */
1948         if (scsi_sg_count(cmd)) {
1949                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950                     scsi_sg_count(cmd), cmd->sc_data_direction);
1951                 if (unlikely(!nseg))
1952                         goto queuing_error;
1953         } else
1954                 nseg = 0;
1955 
1956         tot_dsds = nseg;
1957         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958         if (req->cnt < (req_cnt + 2)) {
1959                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1960                     RD_REG_DWORD_RELAXED(req->req_q_out);
1961                 if (req->ring_index < cnt)
1962                         req->cnt = cnt - req->ring_index;
1963                 else
1964                         req->cnt = req->length -
1965                                 (req->ring_index - cnt);
1966                 if (req->cnt < (req_cnt + 2))
1967                         goto queuing_error;
1968         }
1969 
1970         /* Build command packet. */
1971         req->current_outstanding_cmd = handle;
1972         req->outstanding_cmds[handle] = sp;
1973         sp->handle = handle;
1974         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1975         req->cnt -= req_cnt;
1976 
1977         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1978         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1979 
1980         /* Zero out remaining portion of packet. */
1981         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1982         clr_ptr = (uint32_t *)cmd_pkt + 2;
1983         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1984         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1985 
1986         /* Set NPORT-ID and LUN number*/
1987         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1988         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1989         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1990         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1991         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1992 
1993         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1994         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1995 
1996         cmd_pkt->task = TSK_SIMPLE;
1997 
1998         /* Load SCSI command packet. */
1999         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2000         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2001 
2002         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2003 
2004         /* Build IOCB segments */
2005         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2006 
2007         /* Set total data segment count. */
2008         cmd_pkt->entry_count = (uint8_t)req_cnt;
2009         wmb();
2010         /* Adjust ring index. */
2011         req->ring_index++;
2012         if (req->ring_index == req->length) {
2013                 req->ring_index = 0;
2014                 req->ring_ptr = req->ring;
2015         } else
2016                 req->ring_ptr++;
2017 
2018         sp->flags |= SRB_DMA_VALID;
2019 
2020         /* Set chip new ring index. */
2021         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2022 
2023         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2024         return QLA_SUCCESS;
2025 
2026 queuing_error:
2027         if (tot_dsds)
2028                 scsi_dma_unmap(cmd);
2029 
2030         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2031 
2032         return QLA_FUNCTION_FAILED;
2033 }
2034 
2035 
2036 /**
2037  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2038  * @sp: command to send to the ISP
2039  *
2040  * Returns non-zero if a failure occurred, else zero.
2041  */
2042 int
2043 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2044 {
2045         int                     nseg;
2046         unsigned long           flags;
2047         uint32_t                *clr_ptr;
2048         uint32_t                handle;
2049         uint16_t                cnt;
2050         uint16_t                req_cnt = 0;
2051         uint16_t                tot_dsds;
2052         uint16_t                tot_prot_dsds;
2053         uint16_t                fw_prot_opts = 0;
2054         struct req_que          *req = NULL;
2055         struct rsp_que          *rsp = NULL;
2056         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2057         struct scsi_qla_host    *vha = sp->fcport->vha;
2058         struct qla_hw_data      *ha = vha->hw;
2059         struct cmd_type_crc_2   *cmd_pkt;
2060         uint32_t                status = 0;
2061         struct qla_qpair        *qpair = sp->qpair;
2062 
2063 #define QDSS_GOT_Q_SPACE        BIT_0
2064 
2065         /* Check for host side state */
2066         if (!qpair->online) {
2067                 cmd->result = DID_NO_CONNECT << 16;
2068                 return QLA_INTERFACE_ERROR;
2069         }
2070 
2071         if (!qpair->difdix_supported &&
2072                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2073                 cmd->result = DID_NO_CONNECT << 16;
2074                 return QLA_INTERFACE_ERROR;
2075         }
2076 
2077         /* Only process protection or >16 cdb in this routine */
2078         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2079                 if (cmd->cmd_len <= 16)
2080                         return qla2xxx_start_scsi_mq(sp);
2081         }
2082 
2083         spin_lock_irqsave(&qpair->qp_lock, flags);
2084 
2085         /* Setup qpair pointers */
2086         rsp = qpair->rsp;
2087         req = qpair->req;
2088 
2089         /* So we know we haven't pci_map'ed anything yet */
2090         tot_dsds = 0;
2091 
2092         /* Send marker if required */
2093         if (vha->marker_needed != 0) {
2094                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095                     QLA_SUCCESS) {
2096                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2097                         return QLA_FUNCTION_FAILED;
2098                 }
2099                 vha->marker_needed = 0;
2100         }
2101 
2102         handle = qla2xxx_get_next_handle(req);
2103         if (handle == 0)
2104                 goto queuing_error;
2105 
2106         /* Compute number of required data segments */
2107         /* Map the sg table so we have an accurate count of sg entries needed */
2108         if (scsi_sg_count(cmd)) {
2109                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2110                     scsi_sg_count(cmd), cmd->sc_data_direction);
2111                 if (unlikely(!nseg))
2112                         goto queuing_error;
2113                 else
2114                         sp->flags |= SRB_DMA_VALID;
2115 
2116                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2117                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2118                         struct qla2_sgx sgx;
2119                         uint32_t        partial;
2120 
2121                         memset(&sgx, 0, sizeof(struct qla2_sgx));
2122                         sgx.tot_bytes = scsi_bufflen(cmd);
2123                         sgx.cur_sg = scsi_sglist(cmd);
2124                         sgx.sp = sp;
2125 
2126                         nseg = 0;
2127                         while (qla24xx_get_one_block_sg(
2128                             cmd->device->sector_size, &sgx, &partial))
2129                                 nseg++;
2130                 }
2131         } else
2132                 nseg = 0;
2133 
2134         /* number of required data segments */
2135         tot_dsds = nseg;
2136 
2137         /* Compute number of required protection segments */
2138         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2139                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2140                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2141                 if (unlikely(!nseg))
2142                         goto queuing_error;
2143                 else
2144                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2145 
2146                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2147                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2148                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2149                 }
2150         } else {
2151                 nseg = 0;
2152         }
2153 
2154         req_cnt = 1;
2155         /* Total Data and protection sg segment(s) */
2156         tot_prot_dsds = nseg;
2157         tot_dsds += nseg;
2158         if (req->cnt < (req_cnt + 2)) {
2159                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2160                     RD_REG_DWORD_RELAXED(req->req_q_out);
2161                 if (req->ring_index < cnt)
2162                         req->cnt = cnt - req->ring_index;
2163                 else
2164                         req->cnt = req->length -
2165                                 (req->ring_index - cnt);
2166                 if (req->cnt < (req_cnt + 2))
2167                         goto queuing_error;
2168         }
2169 
2170         status |= QDSS_GOT_Q_SPACE;
2171 
2172         /* Build header part of command packet (excluding the OPCODE). */
2173         req->current_outstanding_cmd = handle;
2174         req->outstanding_cmds[handle] = sp;
2175         sp->handle = handle;
2176         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2177         req->cnt -= req_cnt;
2178 
2179         /* Fill-in common area */
2180         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2181         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2182 
2183         clr_ptr = (uint32_t *)cmd_pkt + 2;
2184         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2185 
2186         /* Set NPORT-ID and LUN number*/
2187         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2188         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2189         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2190         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2191 
2192         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2193         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2194 
2195         /* Total Data and protection segment(s) */
2196         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2197 
2198         /* Build IOCB segments and adjust for data protection segments */
2199         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2200             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2201                 QLA_SUCCESS)
2202                 goto queuing_error;
2203 
2204         cmd_pkt->entry_count = (uint8_t)req_cnt;
2205         cmd_pkt->timeout = cpu_to_le16(0);
2206         wmb();
2207 
2208         /* Adjust ring index. */
2209         req->ring_index++;
2210         if (req->ring_index == req->length) {
2211                 req->ring_index = 0;
2212                 req->ring_ptr = req->ring;
2213         } else
2214                 req->ring_ptr++;
2215 
2216         /* Set chip new ring index. */
2217         WRT_REG_DWORD(req->req_q_in, req->ring_index);
2218 
2219         /* Manage unprocessed RIO/ZIO commands in response queue. */
2220         if (vha->flags.process_response_queue &&
2221             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2222                 qla24xx_process_response_queue(vha, rsp);
2223 
2224         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2225 
2226         return QLA_SUCCESS;
2227 
2228 queuing_error:
2229         if (status & QDSS_GOT_Q_SPACE) {
2230                 req->outstanding_cmds[handle] = NULL;
2231                 req->cnt += req_cnt;
2232         }
2233         /* Cleanup will be performed by the caller (queuecommand) */
2234 
2235         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2236         return QLA_FUNCTION_FAILED;
2237 }
2238 
2239 /* Generic Control-SRB manipulation functions. */
2240 
2241 /* hardware_lock assumed to be held. */
2242 
2243 void *
2244 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2245 {
2246         scsi_qla_host_t *vha = qpair->vha;
2247         struct qla_hw_data *ha = vha->hw;
2248         struct req_que *req = qpair->req;
2249         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2250         uint32_t handle;
2251         request_t *pkt;
2252         uint16_t cnt, req_cnt;
2253 
2254         pkt = NULL;
2255         req_cnt = 1;
2256         handle = 0;
2257 
2258         if (sp && (sp->type != SRB_SCSI_CMD)) {
2259                 /* Adjust entry-counts as needed. */
2260                 req_cnt = sp->iocbs;
2261         }
2262 
2263         /* Check for room on request queue. */
2264         if (req->cnt < req_cnt + 2) {
2265                 if (qpair->use_shadow_reg)
2266                         cnt = *req->out_ptr;
2267                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268                     IS_QLA28XX(ha))
2269                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2270                 else if (IS_P3P_TYPE(ha))
2271                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2272                 else if (IS_FWI2_CAPABLE(ha))
2273                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2274                 else if (IS_QLAFX00(ha))
2275                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2276                 else
2277                         cnt = qla2x00_debounce_register(
2278                             ISP_REQ_Q_OUT(ha, &reg->isp));
2279 
2280                 if  (req->ring_index < cnt)
2281                         req->cnt = cnt - req->ring_index;
2282                 else
2283                         req->cnt = req->length -
2284                             (req->ring_index - cnt);
2285         }
2286         if (req->cnt < req_cnt + 2)
2287                 goto queuing_error;
2288 
2289         if (sp) {
2290                 handle = qla2xxx_get_next_handle(req);
2291                 if (handle == 0) {
2292                         ql_log(ql_log_warn, vha, 0x700b,
2293                             "No room on outstanding cmd array.\n");
2294                         goto queuing_error;
2295                 }
2296 
2297                 /* Prep command array. */
2298                 req->current_outstanding_cmd = handle;
2299                 req->outstanding_cmds[handle] = sp;
2300                 sp->handle = handle;
2301         }
2302 
2303         /* Prep packet */
2304         req->cnt -= req_cnt;
2305         pkt = req->ring_ptr;
2306         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2307         if (IS_QLAFX00(ha)) {
2308                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2309                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2310         } else {
2311                 pkt->entry_count = req_cnt;
2312                 pkt->handle = handle;
2313         }
2314 
2315         return pkt;
2316 
2317 queuing_error:
2318         qpair->tgt_counters.num_alloc_iocb_failed++;
2319         return pkt;
2320 }
2321 
2322 void *
2323 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2324 {
2325         scsi_qla_host_t *vha = qpair->vha;
2326 
2327         if (qla2x00_reset_active(vha))
2328                 return NULL;
2329 
2330         return __qla2x00_alloc_iocbs(qpair, sp);
2331 }
2332 
2333 void *
2334 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2335 {
2336         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2337 }
2338 
2339 static void
2340 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2341 {
2342         struct srb_iocb *lio = &sp->u.iocb_cmd;
2343 
2344         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2345         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2346         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2347                 logio->control_flags |= LCF_NVME_PRLI;
2348                 if (sp->vha->flags.nvme_first_burst)
2349                         logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2350         }
2351 
2352         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2353         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2354         logio->port_id[1] = sp->fcport->d_id.b.area;
2355         logio->port_id[2] = sp->fcport->d_id.b.domain;
2356         logio->vp_index = sp->vha->vp_idx;
2357 }
2358 
2359 static void
2360 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2361 {
2362         struct srb_iocb *lio = &sp->u.iocb_cmd;
2363 
2364         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2365         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2366                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2367         } else {
2368                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2369                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2370                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2371                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2372                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2373         }
2374         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2375         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2376         logio->port_id[1] = sp->fcport->d_id.b.area;
2377         logio->port_id[2] = sp->fcport->d_id.b.domain;
2378         logio->vp_index = sp->vha->vp_idx;
2379 }
2380 
2381 static void
2382 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2383 {
2384         struct qla_hw_data *ha = sp->vha->hw;
2385         struct srb_iocb *lio = &sp->u.iocb_cmd;
2386         uint16_t opts;
2387 
2388         mbx->entry_type = MBX_IOCB_TYPE;
2389         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2390         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2391         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2392         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2393         if (HAS_EXTENDED_IDS(ha)) {
2394                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2395                 mbx->mb10 = cpu_to_le16(opts);
2396         } else {
2397                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2398         }
2399         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2400         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2401             sp->fcport->d_id.b.al_pa);
2402         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2403 }
2404 
2405 static void
2406 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2407 {
2408         u16 control_flags = LCF_COMMAND_LOGO;
2409         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2410 
2411         if (sp->fcport->explicit_logout) {
2412                 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2413         } else {
2414                 control_flags |= LCF_IMPL_LOGO;
2415 
2416                 if (!sp->fcport->keep_nport_handle)
2417                         control_flags |= LCF_FREE_NPORT;
2418         }
2419 
2420         logio->control_flags = cpu_to_le16(control_flags);
2421         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2422         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2423         logio->port_id[1] = sp->fcport->d_id.b.area;
2424         logio->port_id[2] = sp->fcport->d_id.b.domain;
2425         logio->vp_index = sp->vha->vp_idx;
2426 }
2427 
2428 static void
2429 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2430 {
2431         struct qla_hw_data *ha = sp->vha->hw;
2432 
2433         mbx->entry_type = MBX_IOCB_TYPE;
2434         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2435         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2436         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2437             cpu_to_le16(sp->fcport->loop_id) :
2438             cpu_to_le16(sp->fcport->loop_id << 8);
2439         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2440         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2441             sp->fcport->d_id.b.al_pa);
2442         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2443         /* Implicit: mbx->mbx10 = 0. */
2444 }
2445 
2446 static void
2447 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2448 {
2449         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2450         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2451         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2452         logio->vp_index = sp->vha->vp_idx;
2453 }
2454 
2455 static void
2456 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2457 {
2458         struct qla_hw_data *ha = sp->vha->hw;
2459 
2460         mbx->entry_type = MBX_IOCB_TYPE;
2461         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2462         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2463         if (HAS_EXTENDED_IDS(ha)) {
2464                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2465                 mbx->mb10 = cpu_to_le16(BIT_0);
2466         } else {
2467                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2468         }
2469         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2470         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2471         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2472         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2473         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2474 }
2475 
2476 static void
2477 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2478 {
2479         uint32_t flags;
2480         uint64_t lun;
2481         struct fc_port *fcport = sp->fcport;
2482         scsi_qla_host_t *vha = fcport->vha;
2483         struct qla_hw_data *ha = vha->hw;
2484         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2485         struct req_que *req = vha->req;
2486 
2487         flags = iocb->u.tmf.flags;
2488         lun = iocb->u.tmf.lun;
2489 
2490         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2491         tsk->entry_count = 1;
2492         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2493         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2494         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2495         tsk->control_flags = cpu_to_le32(flags);
2496         tsk->port_id[0] = fcport->d_id.b.al_pa;
2497         tsk->port_id[1] = fcport->d_id.b.area;
2498         tsk->port_id[2] = fcport->d_id.b.domain;
2499         tsk->vp_index = fcport->vha->vp_idx;
2500 
2501         if (flags == TCF_LUN_RESET) {
2502                 int_to_scsilun(lun, &tsk->lun);
2503                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2504                         sizeof(tsk->lun));
2505         }
2506 }
2507 
2508 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2509 {
2510         timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2511         sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2512         sp->free = qla2x00_sp_free;
2513         if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2514                 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2515         sp->start_timer = 1;
2516 }
2517 
2518 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2519 {
2520         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2521 
2522         kfree(sp->fcport);
2523 
2524         if (elsio->u.els_logo.els_logo_pyld)
2525                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2526                     elsio->u.els_logo.els_logo_pyld,
2527                     elsio->u.els_logo.els_logo_pyld_dma);
2528 
2529         del_timer(&elsio->timer);
2530         qla2x00_rel_sp(sp);
2531 }
2532 
2533 static void
2534 qla2x00_els_dcmd_iocb_timeout(void *data)
2535 {
2536         srb_t *sp = data;
2537         fc_port_t *fcport = sp->fcport;
2538         struct scsi_qla_host *vha = sp->vha;
2539         struct srb_iocb *lio = &sp->u.iocb_cmd;
2540 
2541         ql_dbg(ql_dbg_io, vha, 0x3069,
2542             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2543             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2544             fcport->d_id.b.al_pa);
2545 
2546         complete(&lio->u.els_logo.comp);
2547 }
2548 
2549 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2550 {
2551         fc_port_t *fcport = sp->fcport;
2552         struct srb_iocb *lio = &sp->u.iocb_cmd;
2553         struct scsi_qla_host *vha = sp->vha;
2554 
2555         ql_dbg(ql_dbg_io, vha, 0x3072,
2556             "%s hdl=%x, portid=%02x%02x%02x done\n",
2557             sp->name, sp->handle, fcport->d_id.b.domain,
2558             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2559 
2560         complete(&lio->u.els_logo.comp);
2561 }
2562 
2563 int
2564 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2565     port_id_t remote_did)
2566 {
2567         srb_t *sp;
2568         fc_port_t *fcport = NULL;
2569         struct srb_iocb *elsio = NULL;
2570         struct qla_hw_data *ha = vha->hw;
2571         struct els_logo_payload logo_pyld;
2572         int rval = QLA_SUCCESS;
2573 
2574         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2575         if (!fcport) {
2576                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2577                return -ENOMEM;
2578         }
2579 
2580         /* Alloc SRB structure */
2581         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2582         if (!sp) {
2583                 kfree(fcport);
2584                 ql_log(ql_log_info, vha, 0x70e6,
2585                  "SRB allocation failed\n");
2586                 return -ENOMEM;
2587         }
2588 
2589         elsio = &sp->u.iocb_cmd;
2590         fcport->loop_id = 0xFFFF;
2591         fcport->d_id.b.domain = remote_did.b.domain;
2592         fcport->d_id.b.area = remote_did.b.area;
2593         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2594 
2595         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2596             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2597 
2598         sp->type = SRB_ELS_DCMD;
2599         sp->name = "ELS_DCMD";
2600         sp->fcport = fcport;
2601         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2602         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2603         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2604         sp->done = qla2x00_els_dcmd_sp_done;
2605         sp->free = qla2x00_els_dcmd_sp_free;
2606 
2607         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2608                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2609                             GFP_KERNEL);
2610 
2611         if (!elsio->u.els_logo.els_logo_pyld) {
2612                 sp->free(sp);
2613                 return QLA_FUNCTION_FAILED;
2614         }
2615 
2616         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2617 
2618         elsio->u.els_logo.els_cmd = els_opcode;
2619         logo_pyld.opcode = els_opcode;
2620         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2621         logo_pyld.s_id[1] = vha->d_id.b.area;
2622         logo_pyld.s_id[2] = vha->d_id.b.domain;
2623         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2624         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2625 
2626         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2627             sizeof(struct els_logo_payload));
2628 
2629         rval = qla2x00_start_sp(sp);
2630         if (rval != QLA_SUCCESS) {
2631                 sp->free(sp);
2632                 return QLA_FUNCTION_FAILED;
2633         }
2634 
2635         ql_dbg(ql_dbg_io, vha, 0x3074,
2636             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2637             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2638             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2639 
2640         wait_for_completion(&elsio->u.els_logo.comp);
2641 
2642         sp->free(sp);
2643         return rval;
2644 }
2645 
2646 static void
2647 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2648 {
2649         scsi_qla_host_t *vha = sp->vha;
2650         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2651 
2652         els_iocb->entry_type = ELS_IOCB_TYPE;
2653         els_iocb->entry_count = 1;
2654         els_iocb->sys_define = 0;
2655         els_iocb->entry_status = 0;
2656         els_iocb->handle = sp->handle;
2657         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2658         els_iocb->tx_dsd_count = 1;
2659         els_iocb->vp_index = vha->vp_idx;
2660         els_iocb->sof_type = EST_SOFI3;
2661         els_iocb->rx_dsd_count = 0;
2662         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2663 
2664         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2665         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2666         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2667         /* For SID the byte order is different than DID */
2668         els_iocb->s_id[1] = vha->d_id.b.al_pa;
2669         els_iocb->s_id[2] = vha->d_id.b.area;
2670         els_iocb->s_id[0] = vha->d_id.b.domain;
2671 
2672         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2673                 els_iocb->control_flags = 0;
2674                 els_iocb->tx_byte_count = els_iocb->tx_len =
2675                         cpu_to_le32(sizeof(struct els_plogi_payload));
2676                 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2677                                    &els_iocb->tx_address);
2678                 els_iocb->rx_dsd_count = 1;
2679                 els_iocb->rx_byte_count = els_iocb->rx_len =
2680                         cpu_to_le32(sizeof(struct els_plogi_payload));
2681                 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2682                                    &els_iocb->rx_address);
2683 
2684                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2685                     "PLOGI ELS IOCB:\n");
2686                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2687                     (uint8_t *)els_iocb,
2688                     sizeof(*els_iocb));
2689         } else {
2690                 els_iocb->control_flags = 1 << 13;
2691                 els_iocb->tx_byte_count =
2692                         cpu_to_le32(sizeof(struct els_logo_payload));
2693                 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2694                                    &els_iocb->tx_address);
2695                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2696 
2697                 els_iocb->rx_byte_count = 0;
2698                 els_iocb->rx_address = 0;
2699                 els_iocb->rx_len = 0;
2700         }
2701 
2702         sp->vha->qla_stats.control_requests++;
2703 }
2704 
2705 static void
2706 qla2x00_els_dcmd2_iocb_timeout(void *data)
2707 {
2708         srb_t *sp = data;
2709         fc_port_t *fcport = sp->fcport;
2710         struct scsi_qla_host *vha = sp->vha;
2711         struct qla_hw_data *ha = vha->hw;
2712         unsigned long flags = 0;
2713         int res;
2714 
2715         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2716             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2717             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2718 
2719         /* Abort the exchange */
2720         spin_lock_irqsave(&ha->hardware_lock, flags);
2721         res = ha->isp_ops->abort_command(sp);
2722         ql_dbg(ql_dbg_io, vha, 0x3070,
2723             "mbx abort_command %s\n",
2724             (res == QLA_SUCCESS) ? "successful" : "failed");
2725         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2726 
2727         sp->done(sp, QLA_FUNCTION_TIMEOUT);
2728 }
2729 
2730 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2731 {
2732         if (els_plogi->els_plogi_pyld)
2733                 dma_free_coherent(&vha->hw->pdev->dev,
2734                                   els_plogi->tx_size,
2735                                   els_plogi->els_plogi_pyld,
2736                                   els_plogi->els_plogi_pyld_dma);
2737 
2738         if (els_plogi->els_resp_pyld)
2739                 dma_free_coherent(&vha->hw->pdev->dev,
2740                                   els_plogi->rx_size,
2741                                   els_plogi->els_resp_pyld,
2742                                   els_plogi->els_resp_pyld_dma);
2743 }
2744 
2745 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2746 {
2747         fc_port_t *fcport = sp->fcport;
2748         struct srb_iocb *lio = &sp->u.iocb_cmd;
2749         struct scsi_qla_host *vha = sp->vha;
2750         struct event_arg ea;
2751         struct qla_work_evt *e;
2752 
2753         ql_dbg(ql_dbg_disc, vha, 0x3072,
2754             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2755             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2756 
2757         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2758         del_timer(&sp->u.iocb_cmd.timer);
2759 
2760         if (sp->flags & SRB_WAKEUP_ON_COMP)
2761                 complete(&lio->u.els_plogi.comp);
2762         else {
2763                 if (res) {
2764                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2765                 } else {
2766                         memset(&ea, 0, sizeof(ea));
2767                         ea.fcport = fcport;
2768                         ea.data[0] = MBS_COMMAND_COMPLETE;
2769                         ea.sp = sp;
2770                         qla24xx_handle_plogi_done_event(vha, &ea);
2771                 }
2772 
2773                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2774                 if (!e) {
2775                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2776 
2777                         qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2778                         sp->free(sp);
2779                         return;
2780                 }
2781                 e->u.iosb.sp = sp;
2782                 qla2x00_post_work(vha, e);
2783         }
2784 }
2785 
2786 int
2787 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2788     fc_port_t *fcport, bool wait)
2789 {
2790         srb_t *sp;
2791         struct srb_iocb *elsio = NULL;
2792         struct qla_hw_data *ha = vha->hw;
2793         int rval = QLA_SUCCESS;
2794         void    *ptr, *resp_ptr;
2795 
2796         /* Alloc SRB structure */
2797         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2798         if (!sp) {
2799                 ql_log(ql_log_info, vha, 0x70e6,
2800                  "SRB allocation failed\n");
2801                 return -ENOMEM;
2802         }
2803 
2804         elsio = &sp->u.iocb_cmd;
2805         ql_dbg(ql_dbg_io, vha, 0x3073,
2806             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2807 
2808         fcport->flags |= FCF_ASYNC_SENT;
2809         sp->type = SRB_ELS_DCMD;
2810         sp->name = "ELS_DCMD";
2811         sp->fcport = fcport;
2812 
2813         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2814         init_completion(&elsio->u.els_plogi.comp);
2815         if (wait)
2816                 sp->flags = SRB_WAKEUP_ON_COMP;
2817 
2818         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2819 
2820         sp->done = qla2x00_els_dcmd2_sp_done;
2821         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2822 
2823         ptr = elsio->u.els_plogi.els_plogi_pyld =
2824             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2825                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2826 
2827         if (!elsio->u.els_plogi.els_plogi_pyld) {
2828                 rval = QLA_FUNCTION_FAILED;
2829                 goto out;
2830         }
2831 
2832         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2833             dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2834                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2835 
2836         if (!elsio->u.els_plogi.els_resp_pyld) {
2837                 rval = QLA_FUNCTION_FAILED;
2838                 goto out;
2839         }
2840 
2841         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2842 
2843         memset(ptr, 0, sizeof(struct els_plogi_payload));
2844         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2845         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2846             &ha->plogi_els_payld.data,
2847             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2848 
2849         elsio->u.els_plogi.els_cmd = els_opcode;
2850         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2851 
2852         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2853         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2854             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2855             sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2856 
2857         rval = qla2x00_start_sp(sp);
2858         if (rval != QLA_SUCCESS) {
2859                 rval = QLA_FUNCTION_FAILED;
2860         } else {
2861                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2862                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2863                     sp->name, sp->handle, fcport->loop_id,
2864                     fcport->d_id.b24, vha->d_id.b24);
2865         }
2866 
2867         if (wait) {
2868                 wait_for_completion(&elsio->u.els_plogi.comp);
2869 
2870                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2871                         rval = QLA_FUNCTION_FAILED;
2872         } else {
2873                 goto done;
2874         }
2875 
2876 out:
2877         fcport->flags &= ~(FCF_ASYNC_SENT);
2878         qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2879         sp->free(sp);
2880 done:
2881         return rval;
2882 }
2883 
2884 static void
2885 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2886 {
2887         struct bsg_job *bsg_job = sp->u.bsg_job;
2888         struct fc_bsg_request *bsg_request = bsg_job->request;
2889 
2890         els_iocb->entry_type = ELS_IOCB_TYPE;
2891         els_iocb->entry_count = 1;
2892         els_iocb->sys_define = 0;
2893         els_iocb->entry_status = 0;
2894         els_iocb->handle = sp->handle;
2895         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2896         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2897         els_iocb->vp_index = sp->vha->vp_idx;
2898         els_iocb->sof_type = EST_SOFI3;
2899         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2900 
2901         els_iocb->opcode =
2902             sp->type == SRB_ELS_CMD_RPT ?
2903             bsg_request->rqst_data.r_els.els_code :
2904             bsg_request->rqst_data.h_els.command_code;
2905         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2906         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2907         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2908         els_iocb->control_flags = 0;
2909         els_iocb->rx_byte_count =
2910             cpu_to_le32(bsg_job->reply_payload.payload_len);
2911         els_iocb->tx_byte_count =
2912             cpu_to_le32(bsg_job->request_payload.payload_len);
2913 
2914         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2915                            &els_iocb->tx_address);
2916         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2917             (bsg_job->request_payload.sg_list));
2918 
2919         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2920                            &els_iocb->rx_address);
2921         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2922             (bsg_job->reply_payload.sg_list));
2923 
2924         sp->vha->qla_stats.control_requests++;
2925 }
2926 
2927 static void
2928 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2929 {
2930         uint16_t        avail_dsds;
2931         struct dsd64    *cur_dsd;
2932         struct scatterlist *sg;
2933         int index;
2934         uint16_t tot_dsds;
2935         scsi_qla_host_t *vha = sp->vha;
2936         struct qla_hw_data *ha = vha->hw;
2937         struct bsg_job *bsg_job = sp->u.bsg_job;
2938         int entry_count = 1;
2939 
2940         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2941         ct_iocb->entry_type = CT_IOCB_TYPE;
2942         ct_iocb->entry_status = 0;
2943         ct_iocb->handle1 = sp->handle;
2944         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2945         ct_iocb->status = cpu_to_le16(0);
2946         ct_iocb->control_flags = cpu_to_le16(0);
2947         ct_iocb->timeout = 0;
2948         ct_iocb->cmd_dsd_count =
2949             cpu_to_le16(bsg_job->request_payload.sg_cnt);
2950         ct_iocb->total_dsd_count =
2951             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2952         ct_iocb->req_bytecount =
2953             cpu_to_le32(bsg_job->request_payload.payload_len);
2954         ct_iocb->rsp_bytecount =
2955             cpu_to_le32(bsg_job->reply_payload.payload_len);
2956 
2957         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2958                            &ct_iocb->req_dsd.address);
2959         ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
2960 
2961         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2962                            &ct_iocb->rsp_dsd.address);
2963         ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
2964 
2965         avail_dsds = 1;
2966         cur_dsd = &ct_iocb->rsp_dsd;
2967         index = 0;
2968         tot_dsds = bsg_job->reply_payload.sg_cnt;
2969 
2970         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2971                 cont_a64_entry_t *cont_pkt;
2972 
2973                 /* Allocate additional continuation packets? */
2974                 if (avail_dsds == 0) {
2975                         /*
2976                         * Five DSDs are available in the Cont.
2977                         * Type 1 IOCB.
2978                                */
2979                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2980                             vha->hw->req_q_map[0]);
2981                         cur_dsd = cont_pkt->dsd;
2982                         avail_dsds = 5;
2983                         entry_count++;
2984                 }
2985 
2986                 append_dsd64(&cur_dsd, sg);
2987                 avail_dsds--;
2988         }
2989         ct_iocb->entry_count = entry_count;
2990 
2991         sp->vha->qla_stats.control_requests++;
2992 }
2993 
2994 static void
2995 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2996 {
2997         uint16_t        avail_dsds;
2998         struct dsd64    *cur_dsd;
2999         struct scatterlist *sg;
3000         int index;
3001         uint16_t cmd_dsds, rsp_dsds;
3002         scsi_qla_host_t *vha = sp->vha;
3003         struct qla_hw_data *ha = vha->hw;
3004         struct bsg_job *bsg_job = sp->u.bsg_job;
3005         int entry_count = 1;
3006         cont_a64_entry_t *cont_pkt = NULL;
3007 
3008         ct_iocb->entry_type = CT_IOCB_TYPE;
3009         ct_iocb->entry_status = 0;
3010         ct_iocb->sys_define = 0;
3011         ct_iocb->handle = sp->handle;
3012 
3013         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3014         ct_iocb->vp_index = sp->vha->vp_idx;
3015         ct_iocb->comp_status = cpu_to_le16(0);
3016 
3017         cmd_dsds = bsg_job->request_payload.sg_cnt;
3018         rsp_dsds = bsg_job->reply_payload.sg_cnt;
3019 
3020         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3021         ct_iocb->timeout = 0;
3022         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3023         ct_iocb->cmd_byte_count =
3024             cpu_to_le32(bsg_job->request_payload.payload_len);
3025 
3026         avail_dsds = 2;
3027         cur_dsd = ct_iocb->dsd;
3028         index = 0;
3029 
3030         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3031                 /* Allocate additional continuation packets? */
3032                 if (avail_dsds == 0) {
3033                         /*
3034                          * Five DSDs are available in the Cont.
3035                          * Type 1 IOCB.
3036                          */
3037                         cont_pkt = qla2x00_prep_cont_type1_iocb(
3038                             vha, ha->req_q_map[0]);
3039                         cur_dsd = cont_pkt->dsd;
3040                         avail_dsds = 5;
3041                         entry_count++;
3042                 }
3043 
3044                 append_dsd64(&cur_dsd, sg);
3045                 avail_dsds--;
3046         }
3047 
3048         index = 0;
3049 
3050         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3051                 /* Allocate additional continuation packets? */
3052                 if (avail_dsds == 0) {
3053                         /*
3054                         * Five DSDs are available in the Cont.
3055                         * Type 1 IOCB.
3056                                */
3057                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3058                             ha->req_q_map[0]);
3059                         cur_dsd = cont_pkt->dsd;
3060                         avail_dsds = 5;
3061                         entry_count++;
3062                 }
3063 
3064                 append_dsd64(&cur_dsd, sg);
3065                 avail_dsds--;
3066         }
3067         ct_iocb->entry_count = entry_count;
3068 }
3069 
3070 /*
3071  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3072  * @sp: command to send to the ISP
3073  *
3074  * Returns non-zero if a failure occurred, else zero.
3075  */
3076 int
3077 qla82xx_start_scsi(srb_t *sp)
3078 {
3079         int             nseg;
3080         unsigned long   flags;
3081         struct scsi_cmnd *cmd;
3082         uint32_t        *clr_ptr;
3083         uint32_t        handle;
3084         uint16_t        cnt;
3085         uint16_t        req_cnt;
3086         uint16_t        tot_dsds;
3087         struct device_reg_82xx __iomem *reg;
3088         uint32_t dbval;
3089         uint32_t *fcp_dl;
3090         uint8_t additional_cdb_len;
3091         struct ct6_dsd *ctx;
3092         struct scsi_qla_host *vha = sp->vha;
3093         struct qla_hw_data *ha = vha->hw;
3094         struct req_que *req = NULL;
3095         struct rsp_que *rsp = NULL;
3096 
3097         /* Setup device pointers. */
3098         reg = &ha->iobase->isp82;
3099         cmd = GET_CMD_SP(sp);
3100         req = vha->req;
3101         rsp = ha->rsp_q_map[0];
3102 
3103         /* So we know we haven't pci_map'ed anything yet */
3104         tot_dsds = 0;
3105 
3106         dbval = 0x04 | (ha->portnum << 5);
3107 
3108         /* Send marker if required */
3109         if (vha->marker_needed != 0) {
3110                 if (qla2x00_marker(vha, ha->base_qpair,
3111                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3112                         ql_log(ql_log_warn, vha, 0x300c,
3113                             "qla2x00_marker failed for cmd=%p.\n", cmd);
3114                         return QLA_FUNCTION_FAILED;
3115                 }
3116                 vha->marker_needed = 0;
3117         }
3118 
3119         /* Acquire ring specific lock */
3120         spin_lock_irqsave(&ha->hardware_lock, flags);
3121 
3122         handle = qla2xxx_get_next_handle(req);
3123         if (handle == 0)
3124                 goto queuing_error;
3125 
3126         /* Map the sg table so we have an accurate count of sg entries needed */
3127         if (scsi_sg_count(cmd)) {
3128                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3129                     scsi_sg_count(cmd), cmd->sc_data_direction);
3130                 if (unlikely(!nseg))
3131                         goto queuing_error;
3132         } else
3133                 nseg = 0;
3134 
3135         tot_dsds = nseg;
3136 
3137         if (tot_dsds > ql2xshiftctondsd) {
3138                 struct cmd_type_6 *cmd_pkt;
3139                 uint16_t more_dsd_lists = 0;
3140                 struct dsd_dma *dsd_ptr;
3141                 uint16_t i;
3142 
3143                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3144                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3145                         ql_dbg(ql_dbg_io, vha, 0x300d,
3146                             "Num of DSD list %d is than %d for cmd=%p.\n",
3147                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3148                             cmd);
3149                         goto queuing_error;
3150                 }
3151 
3152                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3153                         goto sufficient_dsds;
3154                 else
3155                         more_dsd_lists -= ha->gbl_dsd_avail;
3156 
3157                 for (i = 0; i < more_dsd_lists; i++) {
3158                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3159                         if (!dsd_ptr) {
3160                                 ql_log(ql_log_fatal, vha, 0x300e,
3161                                     "Failed to allocate memory for dsd_dma "
3162                                     "for cmd=%p.\n", cmd);
3163                                 goto queuing_error;
3164                         }
3165 
3166                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3167                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3168                         if (!dsd_ptr->dsd_addr) {
3169                                 kfree(dsd_ptr);
3170                                 ql_log(ql_log_fatal, vha, 0x300f,
3171                                     "Failed to allocate memory for dsd_addr "
3172                                     "for cmd=%p.\n", cmd);
3173                                 goto queuing_error;
3174                         }
3175                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3176                         ha->gbl_dsd_avail++;
3177                 }
3178 
3179 sufficient_dsds:
3180                 req_cnt = 1;
3181 
3182                 if (req->cnt < (req_cnt + 2)) {
3183                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3184                                 &reg->req_q_out[0]);
3185                         if (req->ring_index < cnt)
3186                                 req->cnt = cnt - req->ring_index;
3187                         else
3188                                 req->cnt = req->length -
3189                                         (req->ring_index - cnt);
3190                         if (req->cnt < (req_cnt + 2))
3191                                 goto queuing_error;
3192                 }
3193 
3194                 ctx = sp->u.scmd.ct6_ctx =
3195                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3196                 if (!ctx) {
3197                         ql_log(ql_log_fatal, vha, 0x3010,
3198                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3199                         goto queuing_error;
3200                 }
3201 
3202                 memset(ctx, 0, sizeof(struct ct6_dsd));
3203                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3204                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3205                 if (!ctx->fcp_cmnd) {
3206                         ql_log(ql_log_fatal, vha, 0x3011,
3207                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3208                         goto queuing_error;
3209                 }
3210 
3211                 /* Initialize the DSD list and dma handle */
3212                 INIT_LIST_HEAD(&ctx->dsd_list);
3213                 ctx->dsd_use_cnt = 0;
3214 
3215                 if (cmd->cmd_len > 16) {
3216                         additional_cdb_len = cmd->cmd_len - 16;
3217                         if ((cmd->cmd_len % 4) != 0) {
3218                                 /* SCSI command bigger than 16 bytes must be
3219                                  * multiple of 4
3220                                  */
3221                                 ql_log(ql_log_warn, vha, 0x3012,
3222                                     "scsi cmd len %d not multiple of 4 "
3223                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3224                                 goto queuing_error_fcp_cmnd;
3225                         }
3226                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3227                 } else {
3228                         additional_cdb_len = 0;
3229                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3230                 }
3231 
3232                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3233                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3234 
3235                 /* Zero out remaining portion of packet. */
3236                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3237                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3238                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3239                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3240 
3241                 /* Set NPORT-ID and LUN number*/
3242                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3243                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3244                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3245                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3246                 cmd_pkt->vp_index = sp->vha->vp_idx;
3247 
3248                 /* Build IOCB segments */
3249                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3250                         goto queuing_error_fcp_cmnd;
3251 
3252                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3253                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3254 
3255                 /* build FCP_CMND IU */
3256                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3257                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3258 
3259                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3260                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3261                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3262                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3263 
3264                 /* Populate the FCP_PRIO. */
3265                 if (ha->flags.fcp_prio_enabled)
3266                         ctx->fcp_cmnd->task_attribute |=
3267                             sp->fcport->fcp_prio << 3;
3268 
3269                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3270 
3271                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3272                     additional_cdb_len);
3273                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3274 
3275                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3276                 put_unaligned_le64(ctx->fcp_cmnd_dma,
3277                                    &cmd_pkt->fcp_cmnd_dseg_address);
3278 
3279                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3280                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3281                 /* Set total data segment count. */
3282                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3283                 /* Specify response queue number where
3284                  * completion should happen
3285                  */
3286                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3287         } else {
3288                 struct cmd_type_7 *cmd_pkt;
3289 
3290                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3291                 if (req->cnt < (req_cnt + 2)) {
3292                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3293                             &reg->req_q_out[0]);
3294                         if (req->ring_index < cnt)
3295                                 req->cnt = cnt - req->ring_index;
3296                         else
3297                                 req->cnt = req->length -
3298                                         (req->ring_index - cnt);
3299                 }
3300                 if (req->cnt < (req_cnt + 2))
3301                         goto queuing_error;
3302 
3303                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3304                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3305 
3306                 /* Zero out remaining portion of packet. */
3307                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3308                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3309                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3310                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3311 
3312                 /* Set NPORT-ID and LUN number*/
3313                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3314                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3315                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3316                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3317                 cmd_pkt->vp_index = sp->vha->vp_idx;
3318 
3319                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3320                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3321                     sizeof(cmd_pkt->lun));
3322 
3323                 /* Populate the FCP_PRIO. */
3324                 if (ha->flags.fcp_prio_enabled)
3325                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3326 
3327                 /* Load SCSI command packet. */
3328                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3329                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3330 
3331                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3332 
3333                 /* Build IOCB segments */
3334                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3335 
3336                 /* Set total data segment count. */
3337                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3338                 /* Specify response queue number where
3339                  * completion should happen.
3340                  */
3341                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3342 
3343         }
3344         /* Build command packet. */
3345         req->current_outstanding_cmd = handle;
3346         req->outstanding_cmds[handle] = sp;
3347         sp->handle = handle;
3348         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3349         req->cnt -= req_cnt;
3350         wmb();
3351 
3352         /* Adjust ring index. */
3353         req->ring_index++;
3354         if (req->ring_index == req->length) {
3355                 req->ring_index = 0;
3356                 req->ring_ptr = req->ring;
3357         } else
3358                 req->ring_ptr++;
3359 
3360         sp->flags |= SRB_DMA_VALID;
3361 
3362         /* Set chip new ring index. */
3363         /* write, read and verify logic */
3364         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3365         if (ql2xdbwr)
3366                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3367         else {
3368                 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3369                 wmb();
3370                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3371                         WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3372                         wmb();
3373                 }
3374         }
3375 
3376         /* Manage unprocessed RIO/ZIO commands in response queue. */
3377         if (vha->flags.process_response_queue &&
3378             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3379                 qla24xx_process_response_queue(vha, rsp);
3380 
3381         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3382         return QLA_SUCCESS;
3383 
3384 queuing_error_fcp_cmnd:
3385         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3386 queuing_error:
3387         if (tot_dsds)
3388                 scsi_dma_unmap(cmd);
3389 
3390         if (sp->u.scmd.crc_ctx) {
3391                 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3392                 sp->u.scmd.crc_ctx = NULL;
3393         }
3394         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3395 
3396         return QLA_FUNCTION_FAILED;
3397 }
3398 
3399 static void
3400 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3401 {
3402         struct srb_iocb *aio = &sp->u.iocb_cmd;
3403         scsi_qla_host_t *vha = sp->vha;
3404         struct req_que *req = sp->qpair->req;
3405 
3406         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3407         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3408         abt_iocb->entry_count = 1;
3409         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3410         if (sp->fcport) {
3411                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3412                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3413                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3414                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3415         }
3416         abt_iocb->handle_to_abort =
3417             cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3418                                     aio->u.abt.cmd_hndl));
3419         abt_iocb->vp_index = vha->vp_idx;
3420         abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3421         /* Send the command to the firmware */
3422         wmb();
3423 }
3424 
3425 static void
3426 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3427 {
3428         int i, sz;
3429 
3430         mbx->entry_type = MBX_IOCB_TYPE;
3431         mbx->handle = sp->handle;
3432         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3433 
3434         for (i = 0; i < sz; i++)
3435                 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3436 }
3437 
3438 static void
3439 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3440 {
3441         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3442         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3443         ct_pkt->handle = sp->handle;
3444 }
3445 
3446 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3447         struct nack_to_isp *nack)
3448 {
3449         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3450 
3451         nack->entry_type = NOTIFY_ACK_TYPE;
3452         nack->entry_count = 1;
3453         nack->ox_id = ntfy->ox_id;
3454 
3455         nack->u.isp24.handle = sp->handle;
3456         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3457         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3458                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3459                         cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3460         }
3461         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3462         nack->u.isp24.status = ntfy->u.isp24.status;
3463         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3464         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3465         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3466         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3467         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3468         nack->u.isp24.srr_flags = 0;
3469         nack->u.isp24.srr_reject_code = 0;
3470         nack->u.isp24.srr_reject_code_expl = 0;
3471         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3472 }
3473 
3474 /*
3475  * Build NVME LS request
3476  */
3477 static int
3478 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3479 {
3480         struct srb_iocb *nvme;
3481         int     rval = QLA_SUCCESS;
3482 
3483         nvme = &sp->u.iocb_cmd;
3484         cmd_pkt->entry_type = PT_LS4_REQUEST;
3485         cmd_pkt->entry_count = 1;
3486         cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3487 
3488         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3489         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3490         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3491 
3492         cmd_pkt->tx_dseg_count = 1;
3493         cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3494         cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3495         put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3496 
3497         cmd_pkt->rx_dseg_count = 1;
3498         cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3499         cmd_pkt->dsd[1].length  = nvme->u.nvme.rsp_len;
3500         put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3501 
3502         return rval;
3503 }
3504 
3505 static void
3506 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3507 {
3508         int map, pos;
3509 
3510         vce->entry_type = VP_CTRL_IOCB_TYPE;
3511         vce->handle = sp->handle;
3512         vce->entry_count = 1;
3513         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3514         vce->vp_count = cpu_to_le16(1);
3515 
3516         /*
3517          * index map in firmware starts with 1; decrement index
3518          * this is ok as we never use index 0
3519          */
3520         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3521         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3522         vce->vp_idx_map[map] |= 1 << pos;
3523 }
3524 
3525 static void
3526 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3527 {
3528         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3529         logio->control_flags =
3530             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3531 
3532         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3533         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3534         logio->port_id[1] = sp->fcport->d_id.b.area;
3535         logio->port_id[2] = sp->fcport->d_id.b.domain;
3536         logio->vp_index = sp->fcport->vha->vp_idx;
3537 }
3538 
3539 int
3540 qla2x00_start_sp(srb_t *sp)
3541 {
3542         int rval = QLA_SUCCESS;
3543         scsi_qla_host_t *vha = sp->vha;
3544         struct qla_hw_data *ha = vha->hw;
3545         struct qla_qpair *qp = sp->qpair;
3546         void *pkt;
3547         unsigned long flags;
3548 
3549         spin_lock_irqsave(qp->qp_lock_ptr, flags);
3550         pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3551         if (!pkt) {
3552                 rval = EAGAIN;
3553                 ql_log(ql_log_warn, vha, 0x700c,
3554                     "qla2x00_alloc_iocbs failed.\n");
3555                 goto done;
3556         }
3557 
3558         switch (sp->type) {
3559         case SRB_LOGIN_CMD:
3560                 IS_FWI2_CAPABLE(ha) ?
3561                     qla24xx_login_iocb(sp, pkt) :
3562                     qla2x00_login_iocb(sp, pkt);
3563                 break;
3564         case SRB_PRLI_CMD:
3565                 qla24xx_prli_iocb(sp, pkt);
3566                 break;
3567         case SRB_LOGOUT_CMD:
3568                 IS_FWI2_CAPABLE(ha) ?
3569                     qla24xx_logout_iocb(sp, pkt) :
3570                     qla2x00_logout_iocb(sp, pkt);
3571                 break;
3572         case SRB_ELS_CMD_RPT:
3573         case SRB_ELS_CMD_HST:
3574                 qla24xx_els_iocb(sp, pkt);
3575                 break;
3576         case SRB_CT_CMD:
3577                 IS_FWI2_CAPABLE(ha) ?
3578                     qla24xx_ct_iocb(sp, pkt) :
3579                     qla2x00_ct_iocb(sp, pkt);
3580                 break;
3581         case SRB_ADISC_CMD:
3582                 IS_FWI2_CAPABLE(ha) ?
3583                     qla24xx_adisc_iocb(sp, pkt) :
3584                     qla2x00_adisc_iocb(sp, pkt);
3585                 break;
3586         case SRB_TM_CMD:
3587                 IS_QLAFX00(ha) ?
3588                     qlafx00_tm_iocb(sp, pkt) :
3589                     qla24xx_tm_iocb(sp, pkt);
3590                 break;
3591         case SRB_FXIOCB_DCMD:
3592         case SRB_FXIOCB_BCMD:
3593                 qlafx00_fxdisc_iocb(sp, pkt);
3594                 break;
3595         case SRB_NVME_LS:
3596                 qla_nvme_ls(sp, pkt);
3597                 break;
3598         case SRB_ABT_CMD:
3599                 IS_QLAFX00(ha) ?
3600                         qlafx00_abort_iocb(sp, pkt) :
3601                         qla24xx_abort_iocb(sp, pkt);
3602                 break;
3603         case SRB_ELS_DCMD:
3604                 qla24xx_els_logo_iocb(sp, pkt);
3605                 break;
3606         case SRB_CT_PTHRU_CMD:
3607                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3608                 break;
3609         case SRB_MB_IOCB:
3610                 qla2x00_mb_iocb(sp, pkt);
3611                 break;
3612         case SRB_NACK_PLOGI:
3613         case SRB_NACK_PRLI:
3614         case SRB_NACK_LOGO:
3615                 qla2x00_send_notify_ack_iocb(sp, pkt);
3616                 break;
3617         case SRB_CTRL_VP:
3618                 qla25xx_ctrlvp_iocb(sp, pkt);
3619                 break;
3620         case SRB_PRLO_CMD:
3621                 qla24xx_prlo_iocb(sp, pkt);
3622                 break;
3623         default:
3624                 break;
3625         }
3626 
3627         if (sp->start_timer)
3628                 add_timer(&sp->u.iocb_cmd.timer);
3629 
3630         wmb();
3631         qla2x00_start_iocbs(vha, qp->req);
3632 done:
3633         spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3634         return rval;
3635 }
3636 
3637 static void
3638 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3639                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3640 {
3641         uint16_t avail_dsds;
3642         struct dsd64 *cur_dsd;
3643         uint32_t req_data_len = 0;
3644         uint32_t rsp_data_len = 0;
3645         struct scatterlist *sg;
3646         int index;
3647         int entry_count = 1;
3648         struct bsg_job *bsg_job = sp->u.bsg_job;
3649 
3650         /*Update entry type to indicate bidir command */
3651         put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3652 
3653         /* Set the transfer direction, in this set both flags
3654          * Also set the BD_WRAP_BACK flag, firmware will take care
3655          * assigning DID=SID for outgoing pkts.
3656          */
3657         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3658         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3659         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3660                                                         BD_WRAP_BACK);
3661 
3662         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3663         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3664         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3665         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3666 
3667         vha->bidi_stats.transfer_bytes += req_data_len;
3668         vha->bidi_stats.io_count++;
3669 
3670         vha->qla_stats.output_bytes += req_data_len;
3671         vha->qla_stats.output_requests++;
3672 
3673         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3674          * are bundled in continuation iocb
3675          */
3676         avail_dsds = 1;
3677         cur_dsd = &cmd_pkt->fcp_dsd;
3678 
3679         index = 0;
3680 
3681         for_each_sg(bsg_job->request_payload.sg_list, sg,
3682                                 bsg_job->request_payload.sg_cnt, index) {
3683                 cont_a64_entry_t *cont_pkt;
3684 
3685                 /* Allocate additional continuation packets */
3686                 if (avail_dsds == 0) {
3687                         /* Continuation type 1 IOCB can accomodate
3688                          * 5 DSDS
3689                          */
3690                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3691                         cur_dsd = cont_pkt->dsd;
3692                         avail_dsds = 5;
3693                         entry_count++;
3694                 }
3695                 append_dsd64(&cur_dsd, sg);
3696                 avail_dsds--;
3697         }
3698         /* For read request DSD will always goes to continuation IOCB
3699          * and follow the write DSD. If there is room on the current IOCB
3700          * then it is added to that IOCB else new continuation IOCB is
3701          * allocated.
3702          */
3703         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3704                                 bsg_job->reply_payload.sg_cnt, index) {
3705                 cont_a64_entry_t *cont_pkt;
3706 
3707                 /* Allocate additional continuation packets */
3708                 if (avail_dsds == 0) {
3709                         /* Continuation type 1 IOCB can accomodate
3710                          * 5 DSDS
3711                          */
3712                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3713                         cur_dsd = cont_pkt->dsd;
3714                         avail_dsds = 5;
3715                         entry_count++;
3716                 }
3717                 append_dsd64(&cur_dsd, sg);
3718                 avail_dsds--;
3719         }
3720         /* This value should be same as number of IOCB required for this cmd */
3721         cmd_pkt->entry_count = entry_count;
3722 }
3723 
3724 int
3725 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3726 {
3727 
3728         struct qla_hw_data *ha = vha->hw;
3729         unsigned long flags;
3730         uint32_t handle;
3731         uint16_t req_cnt;
3732         uint16_t cnt;
3733         uint32_t *clr_ptr;
3734         struct cmd_bidir *cmd_pkt = NULL;
3735         struct rsp_que *rsp;
3736         struct req_que *req;
3737         int rval = EXT_STATUS_OK;
3738 
3739         rval = QLA_SUCCESS;
3740 
3741         rsp = ha->rsp_q_map[0];
3742         req = vha->req;
3743 
3744         /* Send marker if required */
3745         if (vha->marker_needed != 0) {
3746                 if (qla2x00_marker(vha, ha->base_qpair,
3747                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3748                         return EXT_STATUS_MAILBOX;
3749                 vha->marker_needed = 0;
3750         }
3751 
3752         /* Acquire ring specific lock */
3753         spin_lock_irqsave(&ha->hardware_lock, flags);
3754 
3755         handle = qla2xxx_get_next_handle(req);
3756         if (handle == 0) {
3757                 rval = EXT_STATUS_BUSY;
3758                 goto queuing_error;
3759         }
3760 
3761         /* Calculate number of IOCB required */
3762         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3763 
3764         /* Check for room on request queue. */
3765         if (req->cnt < req_cnt + 2) {
3766                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3767                     RD_REG_DWORD_RELAXED(req->req_q_out);
3768                 if  (req->ring_index < cnt)
3769                         req->cnt = cnt - req->ring_index;
3770                 else
3771                         req->cnt = req->length -
3772                                 (req->ring_index - cnt);
3773         }
3774         if (req->cnt < req_cnt + 2) {
3775                 rval = EXT_STATUS_BUSY;
3776                 goto queuing_error;
3777         }
3778 
3779         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3780         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3781 
3782         /* Zero out remaining portion of packet. */
3783         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3784         clr_ptr = (uint32_t *)cmd_pkt + 2;
3785         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3786 
3787         /* Set NPORT-ID  (of vha)*/
3788         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3789         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3790         cmd_pkt->port_id[1] = vha->d_id.b.area;
3791         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3792 
3793         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3794         cmd_pkt->entry_status = (uint8_t) rsp->id;
3795         /* Build command packet. */
3796         req->current_outstanding_cmd = handle;
3797         req->outstanding_cmds[handle] = sp;
3798         sp->handle = handle;
3799         req->cnt -= req_cnt;
3800 
3801         /* Send the command to the firmware */
3802         wmb();
3803         qla2x00_start_iocbs(vha, req);
3804 queuing_error:
3805         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3806         return rval;
3807 }

/* [<][>][^][v][top][bottom][index][help] */