root/drivers/scsi/esas2r/esas2r_io.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. esas2r_start_request
  2. esas2r_local_start_request
  3. esas2r_start_vda_request
  4. esas2r_build_sg_list_sge
  5. esas2r_build_prd_iblk
  6. esas2r_build_sg_list_prd
  7. esas2r_handle_pending_reset
  8. esas2r_timer_tick
  9. esas2r_send_task_mgmt
  10. esas2r_reset_bus
  11. esas2r_ioreq_aborted

   1 /*
   2  *  linux/drivers/scsi/esas2r/esas2r_io.c
   3  *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
   4  *
   5  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
   6  *  (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
   7  *
   8  * This program is free software; you can redistribute it and/or
   9  * modify it under the terms of the GNU General Public License
  10  * as published by the Free Software Foundation; either version 2
  11  * of the License, or (at your option) any later version.
  12  *
  13  * This program is distributed in the hope that it will be useful,
  14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16  * GNU General Public License for more details.
  17  *
  18  * NO WARRANTY
  19  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  20  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  21  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  22  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  23  * solely responsible for determining the appropriateness of using and
  24  * distributing the Program and assumes all risks associated with its
  25  * exercise of rights under this Agreement, including but not limited to
  26  * the risks and costs of program errors, damage to or loss of data,
  27  * programs or equipment, and unavailability or interruption of operations.
  28  *
  29  * DISCLAIMER OF LIABILITY
  30  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37  *
  38  * You should have received a copy of the GNU General Public License
  39  * along with this program; if not, write to the Free Software
  40  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
  41  * USA.
  42  */
  43 
  44 #include "esas2r.h"
  45 
  46 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
  47 {
  48         struct esas2r_target *t = NULL;
  49         struct esas2r_request *startrq = rq;
  50         unsigned long flags;
  51 
  52         if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) ||
  53                      test_bit(AF_POWER_DOWN, &a->flags))) {
  54                 if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
  55                         rq->req_stat = RS_SEL2;
  56                 else
  57                         rq->req_stat = RS_DEGRADED;
  58         } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
  59                 t = a->targetdb + rq->target_id;
  60 
  61                 if (unlikely(t >= a->targetdb_end
  62                              || !(t->flags & TF_USED))) {
  63                         rq->req_stat = RS_SEL;
  64                 } else {
  65                         /* copy in the target ID. */
  66                         rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
  67 
  68                         /*
  69                          * Test if we want to report RS_SEL for missing target.
  70                          * Note that if AF_DISC_PENDING is set than this will
  71                          * go on the defer queue.
  72                          */
  73                         if (unlikely(t->target_state != TS_PRESENT &&
  74                                      !test_bit(AF_DISC_PENDING, &a->flags)))
  75                                 rq->req_stat = RS_SEL;
  76                 }
  77         }
  78 
  79         if (unlikely(rq->req_stat != RS_PENDING)) {
  80                 esas2r_complete_request(a, rq);
  81                 return;
  82         }
  83 
  84         esas2r_trace("rq=%p", rq);
  85         esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
  86 
  87         if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
  88                 esas2r_trace("rq->target_id=%d", rq->target_id);
  89                 esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
  90         }
  91 
  92         spin_lock_irqsave(&a->queue_lock, flags);
  93 
  94         if (likely(list_empty(&a->defer_list) &&
  95                    !test_bit(AF_CHPRST_PENDING, &a->flags) &&
  96                    !test_bit(AF_FLASHING, &a->flags) &&
  97                    !test_bit(AF_DISC_PENDING, &a->flags)))
  98                 esas2r_local_start_request(a, startrq);
  99         else
 100                 list_add_tail(&startrq->req_list, &a->defer_list);
 101 
 102         spin_unlock_irqrestore(&a->queue_lock, flags);
 103 }
 104 
 105 /*
 106  * Starts the specified request.  all requests have RS_PENDING set when this
 107  * routine is called.  The caller is usually esas2r_start_request, but
 108  * esas2r_do_deferred_processes will start request that are deferred.
 109  *
 110  * The caller must ensure that requests can be started.
 111  *
 112  * esas2r_start_request will defer a request if there are already requests
 113  * waiting or there is a chip reset pending.  once the reset condition clears,
 114  * esas2r_do_deferred_processes will call this function to start the request.
 115  *
 116  * When a request is started, it is placed on the active list and queued to
 117  * the controller.
 118  */
 119 void esas2r_local_start_request(struct esas2r_adapter *a,
 120                                 struct esas2r_request *rq)
 121 {
 122         esas2r_trace_enter();
 123         esas2r_trace("rq=%p", rq);
 124         esas2r_trace("rq->vrq:%p", rq->vrq);
 125         esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
 126 
 127         if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
 128                      && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
 129                 set_bit(AF_FLASHING, &a->flags);
 130 
 131         list_add_tail(&rq->req_list, &a->active_list);
 132         esas2r_start_vda_request(a, rq);
 133         esas2r_trace_exit();
 134         return;
 135 }
 136 
 137 void esas2r_start_vda_request(struct esas2r_adapter *a,
 138                               struct esas2r_request *rq)
 139 {
 140         struct esas2r_inbound_list_source_entry *element;
 141         u32 dw;
 142 
 143         rq->req_stat = RS_STARTED;
 144         /*
 145          * Calculate the inbound list entry location and the current state of
 146          * toggle bit.
 147          */
 148         a->last_write++;
 149         if (a->last_write >= a->list_size) {
 150                 a->last_write = 0;
 151                 /* update the toggle bit */
 152                 if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
 153                         clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 154                 else
 155                         set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
 156         }
 157 
 158         element =
 159                 (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
 160                 virt_addr
 161                 + a->last_write;
 162 
 163         /* Set the VDA request size if it was never modified */
 164         if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
 165                 rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
 166 
 167         element->address = cpu_to_le64(rq->vrq_md->phys_addr);
 168         element->length = cpu_to_le32(rq->vda_req_sz);
 169 
 170         /* Update the write pointer */
 171         dw = a->last_write;
 172 
 173         if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))
 174                 dw |= MU_ILW_TOGGLE;
 175 
 176         esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
 177         esas2r_trace("dw:%x", dw);
 178         esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
 179         esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
 180 }
 181 
 182 /*
 183  * Build the scatter/gather list for an I/O request according to the
 184  * specifications placed in the s/g context.  The caller must initialize
 185  * context prior to the initial call by calling esas2r_sgc_init().
 186  */
 187 bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
 188                               struct esas2r_sg_context *sgc)
 189 {
 190         struct esas2r_request *rq = sgc->first_req;
 191         union atto_vda_req *vrq = rq->vrq;
 192 
 193         while (sgc->length) {
 194                 u32 rem = 0;
 195                 u64 addr;
 196                 u32 len;
 197 
 198                 len = (*sgc->get_phys_addr)(sgc, &addr);
 199 
 200                 if (unlikely(len == 0))
 201                         return false;
 202 
 203                 /* if current length is more than what's left, stop there */
 204                 if (unlikely(len > sgc->length))
 205                         len = sgc->length;
 206 
 207 another_entry:
 208                 /* limit to a round number less than the maximum length */
 209                 if (len > SGE_LEN_MAX) {
 210                         /*
 211                          * Save the remainder of the split.  Whenever we limit
 212                          * an entry we come back around to build entries out
 213                          * of the leftover.  We do this to prevent multiple
 214                          * calls to the get_phys_addr() function for an SGE
 215                          * that is too large.
 216                          */
 217                         rem = len - SGE_LEN_MAX;
 218                         len = SGE_LEN_MAX;
 219                 }
 220 
 221                 /* See if we need to allocate a new SGL */
 222                 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
 223                         u8 sgelen;
 224                         struct esas2r_mem_desc *sgl;
 225 
 226                         /*
 227                          * If no SGls are available, return failure.  The
 228                          * caller can call us later with the current context
 229                          * to pick up here.
 230                          */
 231                         sgl = esas2r_alloc_sgl(a);
 232 
 233                         if (unlikely(sgl == NULL))
 234                                 return false;
 235 
 236                         /* Calculate the length of the last SGE filled in */
 237                         sgelen = (u8)((u8 *)sgc->sge.a64.curr
 238                                       - (u8 *)sgc->sge.a64.last);
 239 
 240                         /*
 241                          * Copy the last SGE filled in to the first entry of
 242                          * the new SGL to make room for the chain entry.
 243                          */
 244                         memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
 245 
 246                         /* Figure out the new curr pointer in the new segment */
 247                         sgc->sge.a64.curr =
 248                                 (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
 249                                                         sgelen);
 250 
 251                         /* Set the limit pointer and build the chain entry */
 252                         sgc->sge.a64.limit =
 253                                 (struct atto_vda_sge *)((u8 *)sgl->virt_addr
 254                                                         + sgl_page_size
 255                                                         - sizeof(struct
 256                                                                  atto_vda_sge));
 257                         sgc->sge.a64.last->length = cpu_to_le32(
 258                                 SGE_CHAIN | SGE_ADDR_64);
 259                         sgc->sge.a64.last->address =
 260                                 cpu_to_le64(sgl->phys_addr);
 261 
 262                         /*
 263                          * Now, if there was a previous chain entry, then
 264                          * update it to contain the length of this segment
 265                          * and size of this chain.  otherwise this is the
 266                          * first SGL, so set the chain_offset in the request.
 267                          */
 268                         if (sgc->sge.a64.chain) {
 269                                 sgc->sge.a64.chain->length |=
 270                                         cpu_to_le32(
 271                                                 ((u8 *)(sgc->sge.a64.
 272                                                         last + 1)
 273                                                  - (u8 *)rq->sg_table->
 274                                                  virt_addr)
 275                                                 + sizeof(struct atto_vda_sge) *
 276                                                 LOBIT(SGE_CHAIN_SZ));
 277                         } else {
 278                                 vrq->scsi.chain_offset = (u8)
 279                                                          ((u8 *)sgc->
 280                                                           sge.a64.last -
 281                                                           (u8 *)vrq);
 282 
 283                                 /*
 284                                  * This is the first SGL, so set the
 285                                  * chain_offset and the VDA request size in
 286                                  * the request.
 287                                  */
 288                                 rq->vda_req_sz =
 289                                         (vrq->scsi.chain_offset +
 290                                          sizeof(struct atto_vda_sge) +
 291                                          3)
 292                                         / sizeof(u32);
 293                         }
 294 
 295                         /*
 296                          * Remember this so when we get a new SGL filled in we
 297                          * can update the length of this chain entry.
 298                          */
 299                         sgc->sge.a64.chain = sgc->sge.a64.last;
 300 
 301                         /* Now link the new SGL onto the primary request. */
 302                         list_add(&sgl->next_desc, &rq->sg_table_head);
 303                 }
 304 
 305                 /* Update last one filled in */
 306                 sgc->sge.a64.last = sgc->sge.a64.curr;
 307 
 308                 /* Build the new SGE and update the S/G context */
 309                 sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
 310                 sgc->sge.a64.curr->address = cpu_to_le32(addr);
 311                 sgc->sge.a64.curr++;
 312                 sgc->cur_offset += len;
 313                 sgc->length -= len;
 314 
 315                 /*
 316                  * Check if we previously split an entry.  If so we have to
 317                  * pick up where we left off.
 318                  */
 319                 if (rem) {
 320                         addr += len;
 321                         len = rem;
 322                         rem = 0;
 323                         goto another_entry;
 324                 }
 325         }
 326 
 327         /* Mark the end of the SGL */
 328         sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
 329 
 330         /*
 331          * If there was a previous chain entry, update the length to indicate
 332          * the length of this last segment.
 333          */
 334         if (sgc->sge.a64.chain) {
 335                 sgc->sge.a64.chain->length |= cpu_to_le32(
 336                         ((u8 *)(sgc->sge.a64.curr) -
 337                          (u8 *)rq->sg_table->virt_addr));
 338         } else {
 339                 u16 reqsize;
 340 
 341                 /*
 342                  * The entire VDA request was not used so lets
 343                  * set the size of the VDA request to be DMA'd
 344                  */
 345                 reqsize =
 346                         ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
 347                          + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
 348 
 349                 /*
 350                  * Only update the request size if it is bigger than what is
 351                  * already there.  We can come in here twice for some management
 352                  * commands.
 353                  */
 354                 if (reqsize > rq->vda_req_sz)
 355                         rq->vda_req_sz = reqsize;
 356         }
 357         return true;
 358 }
 359 
 360 
 361 /*
 362  * Create PRD list for each I-block consumed by the command. This routine
 363  * determines how much data is required from each I-block being consumed
 364  * by the command. The first and last I-blocks can be partials and all of
 365  * the I-blocks in between are for a full I-block of data.
 366  *
 367  * The interleave size is used to determine the number of bytes in the 1st
 368  * I-block and the remaining I-blocks are what remeains.
 369  */
 370 static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
 371                                   struct esas2r_sg_context *sgc)
 372 {
 373         struct esas2r_request *rq = sgc->first_req;
 374         u64 addr;
 375         u32 len;
 376         struct esas2r_mem_desc *sgl;
 377         u32 numchain = 1;
 378         u32 rem = 0;
 379 
 380         while (sgc->length) {
 381                 /* Get the next address/length pair */
 382 
 383                 len = (*sgc->get_phys_addr)(sgc, &addr);
 384 
 385                 if (unlikely(len == 0))
 386                         return false;
 387 
 388                 /* If current length is more than what's left, stop there */
 389 
 390                 if (unlikely(len > sgc->length))
 391                         len = sgc->length;
 392 
 393 another_entry:
 394                 /* Limit to a round number less than the maximum length */
 395 
 396                 if (len > PRD_LEN_MAX) {
 397                         /*
 398                          * Save the remainder of the split.  whenever we limit
 399                          * an entry we come back around to build entries out
 400                          * of the leftover.  We do this to prevent multiple
 401                          * calls to the get_phys_addr() function for an SGE
 402                          * that is too large.
 403                          */
 404                         rem = len - PRD_LEN_MAX;
 405                         len = PRD_LEN_MAX;
 406                 }
 407 
 408                 /* See if we need to allocate a new SGL */
 409                 if (sgc->sge.prd.sge_cnt == 0) {
 410                         if (len == sgc->length) {
 411                                 /*
 412                                  * We only have 1 PRD entry left.
 413                                  * It can be placed where the chain
 414                                  * entry would have gone
 415                                  */
 416 
 417                                 /* Build the simple SGE */
 418                                 sgc->sge.prd.curr->ctl_len = cpu_to_le32(
 419                                         PRD_DATA | len);
 420                                 sgc->sge.prd.curr->address = cpu_to_le64(addr);
 421 
 422                                 /* Adjust length related fields */
 423                                 sgc->cur_offset += len;
 424                                 sgc->length -= len;
 425 
 426                                 /* We use the reserved chain entry for data */
 427                                 numchain = 0;
 428 
 429                                 break;
 430                         }
 431 
 432                         if (sgc->sge.prd.chain) {
 433                                 /*
 434                                  * Fill # of entries of current SGL in previous
 435                                  * chain the length of this current SGL may not
 436                                  * full.
 437                                  */
 438 
 439                                 sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
 440                                         sgc->sge.prd.sgl_max_cnt);
 441                         }
 442 
 443                         /*
 444                          * If no SGls are available, return failure.  The
 445                          * caller can call us later with the current context
 446                          * to pick up here.
 447                          */
 448 
 449                         sgl = esas2r_alloc_sgl(a);
 450 
 451                         if (unlikely(sgl == NULL))
 452                                 return false;
 453 
 454                         /*
 455                          * Link the new SGL onto the chain
 456                          * They are in reverse order
 457                          */
 458                         list_add(&sgl->next_desc, &rq->sg_table_head);
 459 
 460                         /*
 461                          * An SGL was just filled in and we are starting
 462                          * a new SGL. Prime the chain of the ending SGL with
 463                          * info that points to the new SGL. The length gets
 464                          * filled in when the new SGL is filled or ended
 465                          */
 466 
 467                         sgc->sge.prd.chain = sgc->sge.prd.curr;
 468 
 469                         sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
 470                         sgc->sge.prd.chain->address =
 471                                 cpu_to_le64(sgl->phys_addr);
 472 
 473                         /*
 474                          * Start a new segment.
 475                          * Take one away and save for chain SGE
 476                          */
 477 
 478                         sgc->sge.prd.curr =
 479                                 (struct atto_physical_region_description *)sgl
 480                                 ->
 481                                 virt_addr;
 482                         sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
 483                 }
 484 
 485                 sgc->sge.prd.sge_cnt--;
 486                 /* Build the simple SGE */
 487                 sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
 488                 sgc->sge.prd.curr->address = cpu_to_le64(addr);
 489 
 490                 /* Used another element.  Point to the next one */
 491 
 492                 sgc->sge.prd.curr++;
 493 
 494                 /* Adjust length related fields */
 495 
 496                 sgc->cur_offset += len;
 497                 sgc->length -= len;
 498 
 499                 /*
 500                  * Check if we previously split an entry.  If so we have to
 501                  * pick up where we left off.
 502                  */
 503 
 504                 if (rem) {
 505                         addr += len;
 506                         len = rem;
 507                         rem = 0;
 508                         goto another_entry;
 509                 }
 510         }
 511 
 512         if (!list_empty(&rq->sg_table_head)) {
 513                 if (sgc->sge.prd.chain) {
 514                         sgc->sge.prd.chain->ctl_len |=
 515                                 cpu_to_le32(sgc->sge.prd.sgl_max_cnt
 516                                             - sgc->sge.prd.sge_cnt
 517                                             - numchain);
 518                 }
 519         }
 520 
 521         return true;
 522 }
 523 
 524 bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
 525                               struct esas2r_sg_context *sgc)
 526 {
 527         struct esas2r_request *rq = sgc->first_req;
 528         u32 len = sgc->length;
 529         struct esas2r_target *t = a->targetdb + rq->target_id;
 530         u8 is_i_o = 0;
 531         u16 reqsize;
 532         struct atto_physical_region_description *curr_iblk_chn;
 533         u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
 534 
 535         /*
 536          * extract LBA from command so we can determine
 537          * the I-Block boundary
 538          */
 539 
 540         if (rq->vrq->scsi.function == VDA_FUNC_SCSI
 541             && t->target_state == TS_PRESENT
 542             && !(t->flags & TF_PASS_THRU)) {
 543                 u32 lbalo = 0;
 544 
 545                 switch (rq->vrq->scsi.cdb[0]) {
 546                 case    READ_16:
 547                 case    WRITE_16:
 548                 {
 549                         lbalo =
 550                                 MAKEDWORD(MAKEWORD(cdb[9],
 551                                                    cdb[8]),
 552                                           MAKEWORD(cdb[7],
 553                                                    cdb[6]));
 554                         is_i_o = 1;
 555                         break;
 556                 }
 557 
 558                 case    READ_12:
 559                 case    WRITE_12:
 560                 case    READ_10:
 561                 case    WRITE_10:
 562                 {
 563                         lbalo =
 564                                 MAKEDWORD(MAKEWORD(cdb[5],
 565                                                    cdb[4]),
 566                                           MAKEWORD(cdb[3],
 567                                                    cdb[2]));
 568                         is_i_o = 1;
 569                         break;
 570                 }
 571 
 572                 case    READ_6:
 573                 case    WRITE_6:
 574                 {
 575                         lbalo =
 576                                 MAKEDWORD(MAKEWORD(cdb[3],
 577                                                    cdb[2]),
 578                                           MAKEWORD(cdb[1] & 0x1F,
 579                                                    0));
 580                         is_i_o = 1;
 581                         break;
 582                 }
 583 
 584                 default:
 585                         break;
 586                 }
 587 
 588                 if (is_i_o) {
 589                         u32 startlba;
 590 
 591                         rq->vrq->scsi.iblk_cnt_prd = 0;
 592 
 593                         /* Determine size of 1st I-block PRD list       */
 594                         startlba = t->inter_block - (lbalo & (t->inter_block -
 595                                                               1));
 596                         sgc->length = startlba * t->block_size;
 597 
 598                         /* Chk if the 1st iblk chain starts at base of Iblock */
 599                         if ((lbalo & (t->inter_block - 1)) == 0)
 600                                 rq->flags |= RF_1ST_IBLK_BASE;
 601 
 602                         if (sgc->length > len)
 603                                 sgc->length = len;
 604                 } else {
 605                         sgc->length = len;
 606                 }
 607         } else {
 608                 sgc->length = len;
 609         }
 610 
 611         /* get our starting chain address   */
 612 
 613         curr_iblk_chn =
 614                 (struct atto_physical_region_description *)sgc->sge.a64.curr;
 615 
 616         sgc->sge.prd.sgl_max_cnt = sgl_page_size /
 617                                    sizeof(struct
 618                                           atto_physical_region_description);
 619 
 620         /* create all of the I-block PRD lists          */
 621 
 622         while (len) {
 623                 sgc->sge.prd.sge_cnt = 0;
 624                 sgc->sge.prd.chain = NULL;
 625                 sgc->sge.prd.curr = curr_iblk_chn;
 626 
 627                 /* increment to next I-Block    */
 628 
 629                 len -= sgc->length;
 630 
 631                 /* go build the next I-Block PRD list   */
 632 
 633                 if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
 634                         return false;
 635 
 636                 curr_iblk_chn++;
 637 
 638                 if (is_i_o) {
 639                         rq->vrq->scsi.iblk_cnt_prd++;
 640 
 641                         if (len > t->inter_byte)
 642                                 sgc->length = t->inter_byte;
 643                         else
 644                                 sgc->length = len;
 645                 }
 646         }
 647 
 648         /* figure out the size used of the VDA request */
 649 
 650         reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
 651                   / sizeof(u32);
 652 
 653         /*
 654          * only update the request size if it is bigger than what is
 655          * already there.  we can come in here twice for some management
 656          * commands.
 657          */
 658 
 659         if (reqsize > rq->vda_req_sz)
 660                 rq->vda_req_sz = reqsize;
 661 
 662         return true;
 663 }
 664 
 665 static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
 666 {
 667         u32 delta = currtime - a->chip_init_time;
 668 
 669         if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
 670                 /* Wait before accessing registers */
 671         } else if (delta >= ESAS2R_CHPRST_TIME) {
 672                 /*
 673                  * The last reset failed so try again. Reset
 674                  * processing will give up after three tries.
 675                  */
 676                 esas2r_local_reset_adapter(a);
 677         } else {
 678                 /* We can now see if the firmware is ready */
 679                 u32 doorbell;
 680 
 681                 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
 682                 if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
 683                         esas2r_force_interrupt(a);
 684                 } else {
 685                         u32 ver = (doorbell & DRBL_FW_VER_MSK);
 686 
 687                         /* Driver supports API version 0 and 1 */
 688                         esas2r_write_register_dword(a, MU_DOORBELL_OUT,
 689                                                     doorbell);
 690                         if (ver == DRBL_FW_VER_0) {
 691                                 set_bit(AF_CHPRST_DETECTED, &a->flags);
 692                                 set_bit(AF_LEGACY_SGE_MODE, &a->flags);
 693 
 694                                 a->max_vdareq_size = 128;
 695                                 a->build_sgl = esas2r_build_sg_list_sge;
 696                         } else if (ver == DRBL_FW_VER_1) {
 697                                 set_bit(AF_CHPRST_DETECTED, &a->flags);
 698                                 clear_bit(AF_LEGACY_SGE_MODE, &a->flags);
 699 
 700                                 a->max_vdareq_size = 1024;
 701                                 a->build_sgl = esas2r_build_sg_list_prd;
 702                         } else {
 703                                 esas2r_local_reset_adapter(a);
 704                         }
 705                 }
 706         }
 707 }
 708 
 709 
 710 /* This function must be called once per timer tick */
 711 void esas2r_timer_tick(struct esas2r_adapter *a)
 712 {
 713         u32 currtime = jiffies_to_msecs(jiffies);
 714         u32 deltatime = currtime - a->last_tick_time;
 715 
 716         a->last_tick_time = currtime;
 717 
 718         /* count down the uptime */
 719         if (a->chip_uptime &&
 720             !test_bit(AF_CHPRST_PENDING, &a->flags) &&
 721             !test_bit(AF_DISC_PENDING, &a->flags)) {
 722                 if (deltatime >= a->chip_uptime)
 723                         a->chip_uptime = 0;
 724                 else
 725                         a->chip_uptime -= deltatime;
 726         }
 727 
 728         if (test_bit(AF_CHPRST_PENDING, &a->flags)) {
 729                 if (!test_bit(AF_CHPRST_NEEDED, &a->flags) &&
 730                     !test_bit(AF_CHPRST_DETECTED, &a->flags))
 731                         esas2r_handle_pending_reset(a, currtime);
 732         } else {
 733                 if (test_bit(AF_DISC_PENDING, &a->flags))
 734                         esas2r_disc_check_complete(a);
 735                 if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) {
 736                         if (test_bit(AF_HEARTBEAT, &a->flags)) {
 737                                 if ((currtime - a->heartbeat_time) >=
 738                                     ESAS2R_HEARTBEAT_TIME) {
 739                                         clear_bit(AF_HEARTBEAT, &a->flags);
 740                                         esas2r_hdebug("heartbeat failed");
 741                                         esas2r_log(ESAS2R_LOG_CRIT,
 742                                                    "heartbeat failed");
 743                                         esas2r_bugon();
 744                                         esas2r_local_reset_adapter(a);
 745                                 }
 746                         } else {
 747                                 set_bit(AF_HEARTBEAT, &a->flags);
 748                                 a->heartbeat_time = currtime;
 749                                 esas2r_force_interrupt(a);
 750                         }
 751                 }
 752         }
 753 
 754         if (atomic_read(&a->disable_cnt) == 0)
 755                 esas2r_do_deferred_processes(a);
 756 }
 757 
 758 /*
 759  * Send the specified task management function to the target and LUN
 760  * specified in rqaux.  in addition, immediately abort any commands that
 761  * are queued but not sent to the device according to the rules specified
 762  * by the task management function.
 763  */
 764 bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
 765                            struct esas2r_request *rqaux, u8 task_mgt_func)
 766 {
 767         u16 targetid = rqaux->target_id;
 768         u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
 769         bool ret = false;
 770         struct esas2r_request *rq;
 771         struct list_head *next, *element;
 772         unsigned long flags;
 773 
 774         LIST_HEAD(comp_list);
 775 
 776         esas2r_trace_enter();
 777         esas2r_trace("rqaux:%p", rqaux);
 778         esas2r_trace("task_mgt_func:%x", task_mgt_func);
 779         spin_lock_irqsave(&a->queue_lock, flags);
 780 
 781         /* search the defer queue looking for requests for the device */
 782         list_for_each_safe(element, next, &a->defer_list) {
 783                 rq = list_entry(element, struct esas2r_request, req_list);
 784 
 785                 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
 786                     && rq->target_id == targetid
 787                     && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
 788                         || task_mgt_func == 0x20)) { /* target reset */
 789                         /* Found a request affected by the task management */
 790                         if (rq->req_stat == RS_PENDING) {
 791                                 /*
 792                                  * The request is pending or waiting.  We can
 793                                  * safelycomplete the request now.
 794                                  */
 795                                 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
 796                                         list_add_tail(&rq->comp_list,
 797                                                       &comp_list);
 798                         }
 799                 }
 800         }
 801 
 802         /* Send the task management request to the firmware */
 803         rqaux->sense_len = 0;
 804         rqaux->vrq->scsi.length = 0;
 805         rqaux->target_id = targetid;
 806         rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
 807         memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
 808         rqaux->vrq->scsi.flags |=
 809                 cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
 810 
 811         if (test_bit(AF_FLASHING, &a->flags)) {
 812                 /* Assume success.  if there are active requests, return busy */
 813                 rqaux->req_stat = RS_SUCCESS;
 814 
 815                 list_for_each_safe(element, next, &a->active_list) {
 816                         rq = list_entry(element, struct esas2r_request,
 817                                         req_list);
 818                         if (rq->vrq->scsi.function == VDA_FUNC_SCSI
 819                             && rq->target_id == targetid
 820                             && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
 821                                 || task_mgt_func == 0x20))  /* target reset */
 822                                 rqaux->req_stat = RS_BUSY;
 823                 }
 824 
 825                 ret = true;
 826         }
 827 
 828         spin_unlock_irqrestore(&a->queue_lock, flags);
 829 
 830         if (!test_bit(AF_FLASHING, &a->flags))
 831                 esas2r_start_request(a, rqaux);
 832 
 833         esas2r_comp_list_drain(a, &comp_list);
 834 
 835         if (atomic_read(&a->disable_cnt) == 0)
 836                 esas2r_do_deferred_processes(a);
 837 
 838         esas2r_trace_exit();
 839 
 840         return ret;
 841 }
 842 
 843 void esas2r_reset_bus(struct esas2r_adapter *a)
 844 {
 845         esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
 846 
 847         if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
 848             !test_bit(AF_CHPRST_PENDING, &a->flags) &&
 849             !test_bit(AF_DISC_PENDING, &a->flags)) {
 850                 set_bit(AF_BUSRST_NEEDED, &a->flags);
 851                 set_bit(AF_BUSRST_PENDING, &a->flags);
 852                 set_bit(AF_OS_RESET, &a->flags);
 853 
 854                 esas2r_schedule_tasklet(a);
 855         }
 856 }
 857 
 858 bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
 859                           u8 status)
 860 {
 861         esas2r_trace_enter();
 862         esas2r_trace("rq:%p", rq);
 863         list_del_init(&rq->req_list);
 864         if (rq->timeout > RQ_MAX_TIMEOUT) {
 865                 /*
 866                  * The request timed out, but we could not abort it because a
 867                  * chip reset occurred.  Return busy status.
 868                  */
 869                 rq->req_stat = RS_BUSY;
 870                 esas2r_trace_exit();
 871                 return true;
 872         }
 873 
 874         rq->req_stat = status;
 875         esas2r_trace_exit();
 876         return true;
 877 }

/* [<][>][^][v][top][bottom][index][help] */